code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
#!/usr/bin/env python
from __future__ import print_function
from optparse import OptionParser
import os
import sys
class ReadsSplitter:
def __init__(self):
self.options = None
self.files_to_split = []
self.getOptions()
def go(self):
for fn in self.files_to_split:
self.splitFile(fn)
def getOptions(self):
parser = OptionParser()
parser.add_option("-u", "--unaligned", dest="unaligned_dir", \
help="Unaligned read directory", metavar="DIR")
parser.add_option("-o", "--output", dest="output_dir",\
help="Directory for output", metavar="DIR",\
default="data/output/breakpoints/reads")
(options, args) = parser.parse_args()
self.options = options
if options.unaligned_dir:
for file_name in os.listdir(options.unaligned_dir):
if 'unaligned' in file_name:
self.files_to_split.append(options.unaligned_dir + file_name)
def splitFile(self, fn):
if not os.path.isfile(fn):
warning("%s DOES NOT EXIST" %(fn))
exit(1)
read_split_output_dir = self.options.output_dir
ensure_dir(read_split_output_dir)
read_split_output_1 = read_split_output_dir + os.path.split(fn)[1] + ".1"
read_split_output_2 = read_split_output_dir + os.path.split(fn)[1] + ".2"
read_file = open(fn, 'r')
r_o_1 = open(read_split_output_1, 'w')
r_o_2 = open(read_split_output_2, 'w')
for read in self.read_read(read_file):
h1 = read[0].strip()
read_contents = read[1].strip()
h2 = read[2].strip()
read_quality = read[3].strip()
#
l = len(read_contents)
l_1 = int(l / 3)
l_2 = int(l - l_1)
# left
h1_1 = h1 + "/1\n"
read_contents_1 = read_contents[0:l_1] + "\n"
h2_1 = h2 + "/1\n"
read_quality_1 = read_quality[0:l_1] + "\n"
# right
h1_2 = h1 + "/2\n"
read_contents_2 = read_contents[l_2:]+ "\n"
h2_2 = h2 + "/2\n"
read_quality_2 = read_quality[l_2:] + "\n"
r_o_1.write(h1_1)
r_o_1.write(read_contents_1)
r_o_1.write(h2_1)
r_o_1.write(read_quality_1)
r_o_2.write(h1_2)
r_o_2.write(read_contents_2)
r_o_2.write(h2_2)
r_o_2.write(read_quality_2)
r_o_1.close()
r_o_2.close()
read_file.close()
def read_read(self, fp):
while True:
read_bundle = []
for i in range(4):
read_bundle.append(fp.readline())
if not read_bundle[0]:
break
else:
yield read_bundle
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
def warning(*objs):
print("\tINFO: ",*objs, file=sys.stderr)
def main():
'''
splits read files for breakpoint
'''
splitter = ReadsSplitter()
splitter.go()
if __name__=='__main__':
main()
| [
[
[
45,
59
]
],
[
[
81,
93
],
[
383,
395
]
],
[
[
101,
103
],
[
856,
858
],
[
1079,
1081
],
[
1320,
1322
],
[
1403,
1405
],
[
2951,
2953
],
[
2981,
2983
],
[
3008,
3010
]
],
[
[
111,
114
],
[
3077,
3080
]
],
[
[
122,
135
],
[
3170,
3183
]
],
[
[
2928,
2938
],
[
1231,
1241
]
],
[
[
3028,
3035
],
[
1111,
1118
]
],
[
[
3094,
3098
],
[
3234,
3238
]
]
] |
# -*- coding: utf-8 -*-
'''
Copyright (c) 2019 Colin Curtain
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Author: Colin Curtain (ccbogel)
https://github.com/ccbogel/QualCoder
https://qualcoder.wordpress.com/
'''
from PyQt5 import QtWidgets, QtCore
import os
import sys
import logging
import traceback
from GUI.ui_dialog_information import Ui_Dialog_information
path = os.path.abspath(os.path.dirname(__file__))
logger = logging.getLogger(__name__)
def exception_handler(exception_type, value, tb_obj):
""" Global exception handler useful in GUIs.
tb_obj: exception.__traceback__ """
tb = '\n'.join(traceback.format_tb(tb_obj))
text = 'Traceback (most recent call last):\n' + tb + '\n' + exception_type.__name__ + ': ' + str(value)
print(text)
logger.error(_("Uncaught exception: ") + text)
QtWidgets.QMessageBox.critical(None, _('Uncaught Exception'), text)
class DialogInformation(QtWidgets.QDialog):
"""
Dialog to display about information from html and text files for PyQDA development,
version and license.
The html is coded below because it avoids potential data file import errors with pyinstaller.
Called from:
qualcoder.MainWindow.about
view_graph_original.ViewGraphOriginal.list_graph.TextGraphicsItem
view_graph_original.ViewGraphOriginal.circular_graph.TextGraphicsItem
"""
title = ""
text = ""
def __init__(self, app, title, html="", parent=None):
"""Display information text in dialog.
If no html is given, fill with About html. """
sys.excepthook = exception_handler
QtWidgets.QDialog.__init__(self)
self.ui = Ui_Dialog_information()
self.ui.setupUi(self)
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.WindowContextHelpButtonHint)
font = 'font: ' + str(app.settings['fontsize']) + 'pt '
font += '"' + app.settings['font'] + '";'
self.setStyleSheet(font)
self.setWindowTitle(title)
if html == "":
self.setHtml(a)
else:
self.setHtml(html)
def setHtml(self, html):
"""This method is used to populate the textEdit.
Usually called from a View_graph TextGraphicsItem via a context menu. """
self.text = html
self.ui.textEdit.setHtml(self.text)
def accepted(self):
""" Accepted button overridden method """
self.information = self.ui.textEdit.toPlainText()
self.ui.Dialog_information.accept()
a = '<h1 class="western">About QualCoder</h1>\
<h2 class="western">Version:</h2>\
<p>QualCoder 1.9 2020 March 11</p>\
<p>Depends on python 3.x, pyqt5 lxml Pillow ebooklib ply chardet pdfminer.six openpyxl</p>\
<p>VLC should also be installed.</p>\
<p>Tested on: Linux Mint 18.04, Ubuntu 19.04, Lubuntu 18.04, mostly tested on Windows 10, partly tested on Mac OS.</p>\
<p></p>\
<h2 class="western">Acknowledgements</h2>\
<p>Ronggui Huang and Zhang Gehao for creating RQDA, which inspired this software.</p>\
<p>Mike MacCana for the source code for the docx module.</p>\
<p>User: bit4 on stackoverflow who presented the source code to convert html to text.</p>\
<p>ebooklib: Aleksandar Erkalović (<a href="https://github.com/aerkalov">https://github.com/aerkalov</a>)</p>\
<p>The VideoLAN team for the bindings to VLC</p>\
<p>To various members on github for supporting this project.</p>\
<h2 class="western">Other details</h2\
<p>The qda data folder contains folders for imported documents, \
images, audio and video. It also contains the sqlite database, named data.qda, to store coding data.</p>\
<p>QualCoder creates a .qualcoder folder inside your home directory. \
This contains QualCoder.log, config.ini (for settings) and \
recent_project.txt. The config file contains the name of the current coder, \
default working directory and selected font.</p>\
<p>QualCoder is written in python 3 using Qt5 for the graphical interface.</p>\
<p>The REFI-QDA Project import and export are experimental and should not be relied upon. </p>\
<h2 class="western">License</h2>\
<p>MIT License</p>\
<p>Copyright (c) 2020 Colin Curtain</p>\
<p>Permission is hereby granted, free of charge, to any person<br />\
obtaining a copy of this software and associated documentation files<br />\
(the "Software"), to deal in the Software without<br />\
restriction, including without limitation the rights to use, copy,<br />\
modify, merge, publish, distribute, sublicense, and/or sell copies of<br />\
the Software, and to permit persons to whom the Software is furnished<br />\
to do so, subject to the following conditions:</p>\
<p>The above copyright notice and this permission notice shall be <br />\
included in all copies or substantial portions of the Software.</p>\
<p>THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF<br />\
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE<br />\
WARRANTIES OF MERCHANTABILITY,</p>\
<p>FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT<br />\
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,<br />\
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR<br />\
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR<br />\
THE USE OR OTHER DEALINGS IN THE SOFTWARE.</p>'
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = DialogInformation(None, "a title", "")
ui.show()
sys.exit(app.exec_()) | [
[
[
1212,
1221
],
[
1897,
1906
],
[
6315,
6324
],
[
1803,
1812
],
[
2594,
2603
]
],
[
[
1223,
1229
],
[
2749,
2755
]
],
[
[
1237,
1239
],
[
1352,
1354
],
[
1368,
1370
]
],
[
[
1247,
1250
],
[
6338,
6341
],
[
6414,
6417
],
[
2551,
2554
]
],
[
[
1258,
1265
],
[
1404,
1411
]
],
[
[
1273,
1282
],
[
1595,
1604
]
],
[
[
1322,
1343
],
[
2645,
2666
]
],
[
[
1345,
1349
]
],
[
[
1395,
1401
],
[
1752,
1758
]
],
[
[
1437,
1454
],
[
2568,
2585
]
],
[
[
1879,
1896
],
[
6357,
6374
]
],
[
[
3483,
3484
],
[
3018,
3019
]
],
[
[
6309,
6312
],
[
6423,
6426
]
],
[
[
6352,
6354
],
[
6400,
6402
]
]
] |
# coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.2342
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceListOfPortfolio(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'values': 'list[Portfolio]',
'href': 'str',
'links': 'list[Link]'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional'
}
def __init__(self, values=None, href=None, links=None): # noqa: E501
"""
ResourceListOfPortfolio - a model defined in OpenAPI
:param values: (required)
:type values: list[lusid.Portfolio]
:param href:
:type href: str
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._values = None
self._href = None
self._links = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
@property
def values(self):
"""Gets the values of this ResourceListOfPortfolio. # noqa: E501
:return: The values of this ResourceListOfPortfolio. # noqa: E501
:rtype: list[Portfolio]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ResourceListOfPortfolio.
:param values: The values of this ResourceListOfPortfolio. # noqa: E501
:type: list[Portfolio]
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this ResourceListOfPortfolio. # noqa: E501
:return: The href of this ResourceListOfPortfolio. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ResourceListOfPortfolio.
:param href: The href of this ResourceListOfPortfolio. # noqa: E501
:type: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this ResourceListOfPortfolio. # noqa: E501
:return: The links of this ResourceListOfPortfolio. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceListOfPortfolio.
:param links: The links of this ResourceListOfPortfolio. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceListOfPortfolio):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
[
[
221,
227
],
[
4315,
4321
]
],
[
[
235,
237
]
],
[
[
260,
263
],
[
3511,
3514
]
],
[
[
271,
294
],
[
4555,
4578
]
]
] |
# (C) British Crown Copyright 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the UTM coordinate system.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
from numpy.testing import assert_almost_equal
import pytest
import cartopy.crs as ccrs
def check_proj4_params(crs, other_args):
expected = other_args | {'proj=utm', 'no_defs', 'units=m'}
pro4_params = set(crs.proj4_init.lstrip('+').split(' +'))
assert expected == pro4_params
@pytest.mark.parametrize('south', [False, True])
def test_default(south):
zone = 1 # Limits are fixed, so don't bother checking other zones.
utm = ccrs.UTM(zone, southern_hemisphere=south)
other_args = {'ellps=WGS84', 'zone={}'.format(zone)}
if south:
other_args |= {'south'}
check_proj4_params(utm, other_args)
assert_almost_equal(np.array(utm.x_limits),
[-250000, 1250000])
assert_almost_equal(np.array(utm.y_limits),
[-10000000, 25000000])
def test_ellipsoid_transform():
# USGS Professional Paper 1395, pp 269 - 271
globe = ccrs.Globe(ellipse='clrk66')
utm = ccrs.UTM(zone=18, globe=globe)
geodetic = utm.as_geodetic()
other_args = {'ellps=clrk66', 'zone=18'}
check_proj4_params(utm, other_args)
assert_almost_equal(np.array(utm.x_limits),
[-250000, 1250000])
assert_almost_equal(np.array(utm.y_limits),
[-10000000, 25000000])
result = utm.transform_point(-73.5, 40.5, geodetic)
assert_almost_equal(result, np.array([127106.5 + 500000, 4484124.4]),
decimal=1)
inverse_result = geodetic.transform_point(result[0], result[1], utm)
assert_almost_equal(inverse_result, [-73.5, 40.5])
| [
[
[
795,
810
]
],
[
[
812,
820
]
],
[
[
822,
836
]
],
[
[
846,
857
],
[
1517,
1519
],
[
1609,
1611
],
[
1990,
1992
],
[
2082,
2084
],
[
2243,
2245
]
],
[
[
884,
903
],
[
1497,
1516
],
[
1589,
1608
],
[
1970,
1989
],
[
2062,
2081
],
[
2215,
2234
],
[
2398,
2417
]
],
[
[
911,
917
],
[
1152,
1158
]
],
[
[
926,
945
],
[
1307,
1311
],
[
1776,
1780
],
[
1815,
1819
]
],
[
[
952,
970
],
[
1456,
1474
],
[
1929,
1947
]
],
[
[
1204,
1216
]
],
[
[
1687,
1711
]
]
] |
import numpy as np
import pyqtgraph as pg
from datetime import datetime, timedelta
from vnpy.trader.constant import Interval, Direction, Offset
from vnpy.trader.engine import MainEngine
from vnpy.trader.ui import QtCore, QtWidgets, QtGui
from vnpy.trader.ui.widget import BaseMonitor, BaseCell, DirectionCell, EnumCell
from vnpy.trader.ui.editor import CodeEditor
from vnpy.event import Event, EventEngine
from vnpy.chart import ChartWidget, CandleItem, VolumeItem
from vnpy.trader.utility import load_json, save_json
from ..engine import (
APP_NAME,
EVENT_BACKTESTER_LOG,
EVENT_BACKTESTER_BACKTESTING_FINISHED,
EVENT_BACKTESTER_OPTIMIZATION_FINISHED,
OptimizationSetting
)
class BacktesterManager(QtWidgets.QWidget):
""""""
setting_filename = "cta_backtester_setting.json"
signal_log = QtCore.pyqtSignal(Event)
signal_backtesting_finished = QtCore.pyqtSignal(Event)
signal_optimization_finished = QtCore.pyqtSignal(Event)
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.backtester_engine = main_engine.get_engine(APP_NAME)
self.class_names = []
self.settings = {}
self.target_display = ""
self.init_ui()
self.register_event()
self.backtester_engine.init_engine()
self.init_strategy_settings()
def init_strategy_settings(self):
""""""
self.class_names = self.backtester_engine.get_strategy_class_names()
for class_name in self.class_names:
setting = self.backtester_engine.get_default_setting(class_name)
self.settings[class_name] = setting
self.class_combo.addItems(self.class_names)
def init_ui(self):
""""""
self.setWindowTitle("CTA回测")
# Setting Part
self.class_combo = QtWidgets.QComboBox()
self.symbol_line = QtWidgets.QLineEdit("IF88.CFFEX")
self.interval_combo = QtWidgets.QComboBox()
for inteval in Interval:
self.interval_combo.addItem(inteval.value)
end_dt = datetime.now()
start_dt = end_dt - timedelta(days=3 * 365)
self.start_date_edit = QtWidgets.QDateEdit(
QtCore.QDate(
start_dt.year,
start_dt.month,
start_dt.day
)
)
self.end_date_edit = QtWidgets.QDateEdit(
QtCore.QDate.currentDate()
)
self.rate_line = QtWidgets.QLineEdit("0.000025")
self.slippage_line = QtWidgets.QLineEdit("0.2")
self.size_line = QtWidgets.QLineEdit("300")
self.pricetick_line = QtWidgets.QLineEdit("0.2")
self.capital_line = QtWidgets.QLineEdit("1000000")
self.inverse_combo = QtWidgets.QComboBox()
self.inverse_combo.addItems(["正向", "反向"])
backtesting_button = QtWidgets.QPushButton("开始回测")
backtesting_button.clicked.connect(self.start_backtesting)
optimization_button = QtWidgets.QPushButton("参数优化")
optimization_button.clicked.connect(self.start_optimization)
self.result_button = QtWidgets.QPushButton("优化结果")
self.result_button.clicked.connect(self.show_optimization_result)
self.result_button.setEnabled(False)
downloading_button = QtWidgets.QPushButton("下载数据")
downloading_button.clicked.connect(self.start_downloading)
self.order_button = QtWidgets.QPushButton("委托记录")
self.order_button.clicked.connect(self.show_backtesting_orders)
self.order_button.setEnabled(False)
self.trade_button = QtWidgets.QPushButton("成交记录")
self.trade_button.clicked.connect(self.show_backtesting_trades)
self.trade_button.setEnabled(False)
self.daily_button = QtWidgets.QPushButton("每日盈亏")
self.daily_button.clicked.connect(self.show_daily_results)
self.daily_button.setEnabled(False)
self.candle_button = QtWidgets.QPushButton("K线图表")
self.candle_button.clicked.connect(self.show_candle_chart)
self.candle_button.setEnabled(False)
edit_button = QtWidgets.QPushButton("代码编辑")
edit_button.clicked.connect(self.edit_strategy_code)
reload_button = QtWidgets.QPushButton("策略重载")
reload_button.clicked.connect(self.reload_strategy_class)
for button in [
backtesting_button,
optimization_button,
downloading_button,
self.result_button,
self.order_button,
self.trade_button,
self.daily_button,
self.candle_button,
edit_button,
reload_button
]:
button.setFixedHeight(button.sizeHint().height() * 2)
form = QtWidgets.QFormLayout()
form.addRow("交易策略", self.class_combo)
form.addRow("本地代码", self.symbol_line)
form.addRow("K线周期", self.interval_combo)
form.addRow("开始日期", self.start_date_edit)
form.addRow("结束日期", self.end_date_edit)
form.addRow("手续费率", self.rate_line)
form.addRow("交易滑点", self.slippage_line)
form.addRow("合约乘数", self.size_line)
form.addRow("价格跳动", self.pricetick_line)
form.addRow("回测资金", self.capital_line)
form.addRow("合约模式", self.inverse_combo)
result_grid = QtWidgets.QGridLayout()
result_grid.addWidget(self.trade_button, 0, 0)
result_grid.addWidget(self.order_button, 0, 1)
result_grid.addWidget(self.daily_button, 1, 0)
result_grid.addWidget(self.candle_button, 1, 1)
left_vbox = QtWidgets.QVBoxLayout()
left_vbox.addLayout(form)
left_vbox.addWidget(backtesting_button)
left_vbox.addWidget(downloading_button)
left_vbox.addStretch()
left_vbox.addLayout(result_grid)
left_vbox.addStretch()
left_vbox.addWidget(optimization_button)
left_vbox.addWidget(self.result_button)
left_vbox.addStretch()
left_vbox.addWidget(edit_button)
left_vbox.addWidget(reload_button)
# Result part
self.statistics_monitor = StatisticsMonitor()
self.log_monitor = QtWidgets.QTextEdit()
self.log_monitor.setMaximumHeight(400)
self.chart = BacktesterChart()
self.chart.setMinimumWidth(1000)
self.trade_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测成交记录",
BacktestingTradeMonitor
)
self.order_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测委托记录",
BacktestingOrderMonitor
)
self.daily_dialog = BacktestingResultDialog(
self.main_engine,
self.event_engine,
"回测每日盈亏",
DailyResultMonitor
)
# Candle Chart
self.candle_dialog = CandleChartDialog()
# Layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.statistics_monitor)
vbox.addWidget(self.log_monitor)
hbox = QtWidgets.QHBoxLayout()
hbox.addLayout(left_vbox)
hbox.addLayout(vbox)
hbox.addWidget(self.chart)
self.setLayout(hbox)
# Code Editor
self.editor = CodeEditor(self.main_engine, self.event_engine)
# Load setting
setting = load_json(self.setting_filename)
if not setting:
return
self.class_combo.setCurrentIndex(
self.class_combo.findText(setting["class_name"])
)
self.symbol_line.setText(setting["vt_symbol"])
self.interval_combo.setCurrentIndex(
self.interval_combo.findText(setting["interval"])
)
self.rate_line.setText(str(setting["rate"]))
self.slippage_line.setText(str(setting["slippage"]))
self.size_line.setText(str(setting["size"]))
self.pricetick_line.setText(str(setting["pricetick"]))
self.capital_line.setText(str(setting["capital"]))
if not setting["inverse"]:
self.inverse_combo.setCurrentIndex(0)
else:
self.inverse_combo.setCurrentIndex(1)
def register_event(self):
""""""
self.signal_log.connect(self.process_log_event)
self.signal_backtesting_finished.connect(
self.process_backtesting_finished_event)
self.signal_optimization_finished.connect(
self.process_optimization_finished_event)
self.event_engine.register(EVENT_BACKTESTER_LOG, self.signal_log.emit)
self.event_engine.register(
EVENT_BACKTESTER_BACKTESTING_FINISHED, self.signal_backtesting_finished.emit)
self.event_engine.register(
EVENT_BACKTESTER_OPTIMIZATION_FINISHED, self.signal_optimization_finished.emit)
def process_log_event(self, event: Event):
""""""
msg = event.data
self.write_log(msg)
def write_log(self, msg):
""""""
timestamp = datetime.now().strftime("%H:%M:%S")
msg = f"{timestamp}\t{msg}"
self.log_monitor.append(msg)
def process_backtesting_finished_event(self, event: Event):
""""""
statistics = self.backtester_engine.get_result_statistics()
self.statistics_monitor.set_data(statistics)
df = self.backtester_engine.get_result_df()
self.chart.set_data(df)
self.trade_button.setEnabled(True)
self.order_button.setEnabled(True)
self.daily_button.setEnabled(True)
self.candle_button.setEnabled(True)
def process_optimization_finished_event(self, event: Event):
""""""
self.write_log("请点击[优化结果]按钮查看")
self.result_button.setEnabled(True)
def start_backtesting(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
# Save backtesting parameters
backtesting_setting = {
"class_name": class_name,
"vt_symbol": vt_symbol,
"interval": interval,
"rate": rate,
"slippage": slippage,
"size": size,
"pricetick": pricetick,
"capital": capital,
"inverse": inverse,
}
save_json(self.setting_filename, backtesting_setting)
# Get strategy setting
old_setting = self.settings[class_name]
dialog = BacktestingSettingEditor(class_name, old_setting)
i = dialog.exec()
if i != dialog.Accepted:
return
new_setting = dialog.get_setting()
self.settings[class_name] = new_setting
result = self.backtester_engine.start_backtesting(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
new_setting
)
if result:
self.statistics_monitor.clear_data()
self.chart.clear_data()
self.trade_button.setEnabled(False)
self.order_button.setEnabled(False)
self.daily_button.setEnabled(False)
self.candle_button.setEnabled(False)
self.trade_dialog.clear_data()
self.order_dialog.clear_data()
self.daily_dialog.clear_data()
self.candle_dialog.clear_data()
def start_optimization(self):
""""""
class_name = self.class_combo.currentText()
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start = self.start_date_edit.date().toPyDate()
end = self.end_date_edit.date().toPyDate()
rate = float(self.rate_line.text())
slippage = float(self.slippage_line.text())
size = float(self.size_line.text())
pricetick = float(self.pricetick_line.text())
capital = float(self.capital_line.text())
if self.inverse_combo.currentText() == "正向":
inverse = False
else:
inverse = True
parameters = self.settings[class_name]
dialog = OptimizationSettingEditor(class_name, parameters)
i = dialog.exec()
if i != dialog.Accepted:
return
optimization_setting, use_ga = dialog.get_setting()
self.target_display = dialog.target_display
self.backtester_engine.start_optimization(
class_name,
vt_symbol,
interval,
start,
end,
rate,
slippage,
size,
pricetick,
capital,
inverse,
optimization_setting,
use_ga
)
self.result_button.setEnabled(False)
def start_downloading(self):
""""""
vt_symbol = self.symbol_line.text()
interval = self.interval_combo.currentText()
start_date = self.start_date_edit.date()
end_date = self.end_date_edit.date()
start = datetime(start_date.year(), start_date.month(), start_date.day())
end = datetime(end_date.year(), end_date.month(), end_date.day(), 23, 59, 59)
self.backtester_engine.start_downloading(
vt_symbol,
interval,
start,
end
)
def show_optimization_result(self):
""""""
result_values = self.backtester_engine.get_result_values()
dialog = OptimizationResultMonitor(
result_values,
self.target_display
)
dialog.exec_()
def show_backtesting_trades(self):
""""""
if not self.trade_dialog.is_updated():
trades = self.backtester_engine.get_all_trades()
self.trade_dialog.update_data(trades)
self.trade_dialog.exec_()
def show_backtesting_orders(self):
""""""
if not self.order_dialog.is_updated():
orders = self.backtester_engine.get_all_orders()
self.order_dialog.update_data(orders)
self.order_dialog.exec_()
def show_daily_results(self):
""""""
if not self.daily_dialog.is_updated():
results = self.backtester_engine.get_all_daily_results()
self.daily_dialog.update_data(results)
self.daily_dialog.exec_()
def show_candle_chart(self):
""""""
if not self.candle_dialog.is_updated():
history = self.backtester_engine.get_history_data()
self.candle_dialog.update_history(history)
trades = self.backtester_engine.get_all_trades()
self.candle_dialog.update_trades(trades)
self.candle_dialog.exec_()
def edit_strategy_code(self):
""""""
class_name = self.class_combo.currentText()
file_path = self.backtester_engine.get_strategy_class_file(class_name)
self.editor.open_editor(file_path)
self.editor.show()
def reload_strategy_class(self):
""""""
self.backtester_engine.reload_strategy_class()
self.class_combo.clear()
self.init_strategy_settings()
def show(self):
""""""
self.showMaximized()
class StatisticsMonitor(QtWidgets.QTableWidget):
""""""
KEY_NAME_MAP = {
"start_date": "首个交易日",
"end_date": "最后交易日",
"total_days": "总交易日",
"profit_days": "盈利交易日",
"loss_days": "亏损交易日",
"capital": "起始资金",
"end_balance": "结束资金",
"total_return": "总收益率",
"annual_return": "年化收益",
"max_drawdown": "最大回撤",
"max_ddpercent": "百分比最大回撤",
"total_net_pnl": "总盈亏",
"total_commission": "总手续费",
"total_slippage": "总滑点",
"total_turnover": "总成交额",
"total_trade_count": "总成交笔数",
"daily_net_pnl": "日均盈亏",
"daily_commission": "日均手续费",
"daily_slippage": "日均滑点",
"daily_turnover": "日均成交额",
"daily_trade_count": "日均成交笔数",
"daily_return": "日均收益率",
"return_std": "收益标准差",
"sharpe_ratio": "夏普比率",
"return_drawdown_ratio": "收益回撤比"
}
def __init__(self):
""""""
super().__init__()
self.cells = {}
self.init_ui()
def init_ui(self):
""""""
self.setRowCount(len(self.KEY_NAME_MAP))
self.setVerticalHeaderLabels(list(self.KEY_NAME_MAP.values()))
self.setColumnCount(1)
self.horizontalHeader().setVisible(False)
self.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.Stretch
)
self.setEditTriggers(self.NoEditTriggers)
for row, key in enumerate(self.KEY_NAME_MAP.keys()):
cell = QtWidgets.QTableWidgetItem()
self.setItem(row, 0, cell)
self.cells[key] = cell
def clear_data(self):
""""""
for cell in self.cells.values():
cell.setText("")
def set_data(self, data: dict):
""""""
data["capital"] = f"{data['capital']:,.2f}"
data["end_balance"] = f"{data['end_balance']:,.2f}"
data["total_return"] = f"{data['total_return']:,.2f}%"
data["annual_return"] = f"{data['annual_return']:,.2f}%"
data["max_drawdown"] = f"{data['max_drawdown']:,.2f}"
data["max_ddpercent"] = f"{data['max_ddpercent']:,.2f}%"
data["total_net_pnl"] = f"{data['total_net_pnl']:,.2f}"
data["total_commission"] = f"{data['total_commission']:,.2f}"
data["total_slippage"] = f"{data['total_slippage']:,.2f}"
data["total_turnover"] = f"{data['total_turnover']:,.2f}"
data["daily_net_pnl"] = f"{data['daily_net_pnl']:,.2f}"
data["daily_commission"] = f"{data['daily_commission']:,.2f}"
data["daily_slippage"] = f"{data['daily_slippage']:,.2f}"
data["daily_turnover"] = f"{data['daily_turnover']:,.2f}"
data["daily_return"] = f"{data['daily_return']:,.2f}%"
data["return_std"] = f"{data['return_std']:,.2f}%"
data["sharpe_ratio"] = f"{data['sharpe_ratio']:,.2f}"
data["return_drawdown_ratio"] = f"{data['return_drawdown_ratio']:,.2f}"
for key, cell in self.cells.items():
value = data.get(key, "")
cell.setText(str(value))
class BacktestingSettingEditor(QtWidgets.QDialog):
"""
For creating new strategy and editing strategy parameters.
"""
def __init__(
self, class_name: str, parameters: dict
):
""""""
super(BacktestingSettingEditor, self).__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.init_ui()
def init_ui(self):
""""""
form = QtWidgets.QFormLayout()
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"策略参数配置:{self.class_name}")
button_text = "确定"
parameters = self.parameters
for name, value in parameters.items():
type_ = type(value)
edit = QtWidgets.QLineEdit(str(value))
if type_ is int:
validator = QtGui.QIntValidator()
edit.setValidator(validator)
elif type_ is float:
validator = QtGui.QDoubleValidator()
edit.setValidator(validator)
form.addRow(f"{name} {type_}", edit)
self.edits[name] = (edit, type_)
button = QtWidgets.QPushButton(button_text)
button.clicked.connect(self.accept)
form.addRow(button)
self.setLayout(form)
def get_setting(self):
""""""
setting = {}
for name, tp in self.edits.items():
edit, type_ = tp
value_text = edit.text()
if type_ == bool:
if value_text == "True":
value = True
else:
value = False
else:
value = type_(value_text)
setting[name] = value
return setting
class BacktesterChart(pg.GraphicsWindow):
""""""
def __init__(self):
""""""
super().__init__(title="Backtester Chart")
self.dates = {}
self.init_ui()
def init_ui(self):
""""""
pg.setConfigOptions(antialias=True)
# Create plot widgets
self.balance_plot = self.addPlot(
title="账户净值",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.drawdown_plot = self.addPlot(
title="净值回撤",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.pnl_plot = self.addPlot(
title="每日盈亏",
axisItems={"bottom": DateAxis(self.dates, orientation="bottom")}
)
self.nextRow()
self.distribution_plot = self.addPlot(title="盈亏分布")
# Add curves and bars on plot widgets
self.balance_curve = self.balance_plot.plot(
pen=pg.mkPen("#ffc107", width=3)
)
dd_color = "#303f9f"
self.drawdown_curve = self.drawdown_plot.plot(
fillLevel=-0.3, brush=dd_color, pen=dd_color
)
profit_color = 'r'
loss_color = 'g'
self.profit_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=profit_color, pen=profit_color
)
self.loss_pnl_bar = pg.BarGraphItem(
x=[], height=[], width=0.3, brush=loss_color, pen=loss_color
)
self.pnl_plot.addItem(self.profit_pnl_bar)
self.pnl_plot.addItem(self.loss_pnl_bar)
distribution_color = "#6d4c41"
self.distribution_curve = self.distribution_plot.plot(
fillLevel=-0.3, brush=distribution_color, pen=distribution_color
)
def clear_data(self):
""""""
self.balance_curve.setData([], [])
self.drawdown_curve.setData([], [])
self.profit_pnl_bar.setOpts(x=[], height=[])
self.loss_pnl_bar.setOpts(x=[], height=[])
self.distribution_curve.setData([], [])
def set_data(self, df):
""""""
if df is None:
return
count = len(df)
self.dates.clear()
for n, date in enumerate(df.index):
self.dates[n] = date
# Set data for curve of balance and drawdown
self.balance_curve.setData(df["balance"])
self.drawdown_curve.setData(df["drawdown"])
# Set data for daily pnl bar
profit_pnl_x = []
profit_pnl_height = []
loss_pnl_x = []
loss_pnl_height = []
for count, pnl in enumerate(df["net_pnl"]):
if pnl >= 0:
profit_pnl_height.append(pnl)
profit_pnl_x.append(count)
else:
loss_pnl_height.append(pnl)
loss_pnl_x.append(count)
self.profit_pnl_bar.setOpts(x=profit_pnl_x, height=profit_pnl_height)
self.loss_pnl_bar.setOpts(x=loss_pnl_x, height=loss_pnl_height)
# Set data for pnl distribution
hist, x = np.histogram(df["net_pnl"], bins="auto")
x = x[:-1]
self.distribution_curve.setData(x, hist)
class DateAxis(pg.AxisItem):
"""Axis for showing date data"""
def __init__(self, dates: dict, *args, **kwargs):
""""""
super().__init__(*args, **kwargs)
self.dates = dates
def tickStrings(self, values, scale, spacing):
""""""
strings = []
for v in values:
dt = self.dates.get(v, "")
strings.append(str(dt))
return strings
class OptimizationSettingEditor(QtWidgets.QDialog):
"""
For setting up parameters for optimization.
"""
DISPLAY_NAME_MAP = {
"总收益率": "total_return",
"夏普比率": "sharpe_ratio",
"收益回撤比": "return_drawdown_ratio",
"日均盈亏": "daily_net_pnl"
}
def __init__(
self, class_name: str, parameters: dict
):
""""""
super().__init__()
self.class_name = class_name
self.parameters = parameters
self.edits = {}
self.optimization_setting = None
self.use_ga = False
self.init_ui()
def init_ui(self):
""""""
QLabel = QtWidgets.QLabel
self.target_combo = QtWidgets.QComboBox()
self.target_combo.addItems(list(self.DISPLAY_NAME_MAP.keys()))
grid = QtWidgets.QGridLayout()
grid.addWidget(QLabel("目标"), 0, 0)
grid.addWidget(self.target_combo, 0, 1, 1, 3)
grid.addWidget(QLabel("参数"), 1, 0)
grid.addWidget(QLabel("开始"), 1, 1)
grid.addWidget(QLabel("步进"), 1, 2)
grid.addWidget(QLabel("结束"), 1, 3)
# Add vt_symbol and name edit if add new strategy
self.setWindowTitle(f"优化参数配置:{self.class_name}")
validator = QtGui.QDoubleValidator()
row = 2
for name, value in self.parameters.items():
type_ = type(value)
if type_ not in [int, float]:
continue
start_edit = QtWidgets.QLineEdit(str(value))
step_edit = QtWidgets.QLineEdit(str(1))
end_edit = QtWidgets.QLineEdit(str(value))
for edit in [start_edit, step_edit, end_edit]:
edit.setValidator(validator)
grid.addWidget(QLabel(name), row, 0)
grid.addWidget(start_edit, row, 1)
grid.addWidget(step_edit, row, 2)
grid.addWidget(end_edit, row, 3)
self.edits[name] = {
"type": type_,
"start": start_edit,
"step": step_edit,
"end": end_edit
}
row += 1
parallel_button = QtWidgets.QPushButton("多进程优化")
parallel_button.clicked.connect(self.generate_parallel_setting)
grid.addWidget(parallel_button, row, 0, 1, 4)
row += 1
ga_button = QtWidgets.QPushButton("遗传算法优化")
ga_button.clicked.connect(self.generate_ga_setting)
grid.addWidget(ga_button, row, 0, 1, 4)
self.setLayout(grid)
def generate_ga_setting(self):
""""""
self.use_ga = True
self.generate_setting()
def generate_parallel_setting(self):
""""""
self.use_ga = False
self.generate_setting()
def generate_setting(self):
""""""
self.optimization_setting = OptimizationSetting()
self.target_display = self.target_combo.currentText()
target_name = self.DISPLAY_NAME_MAP[self.target_display]
self.optimization_setting.set_target(target_name)
for name, d in self.edits.items():
type_ = d["type"]
start_value = type_(d["start"].text())
step_value = type_(d["step"].text())
end_value = type_(d["end"].text())
if start_value == end_value:
self.optimization_setting.add_parameter(name, start_value)
else:
self.optimization_setting.add_parameter(
name,
start_value,
end_value,
step_value
)
self.accept()
def get_setting(self):
""""""
return self.optimization_setting, self.use_ga
class OptimizationResultMonitor(QtWidgets.QDialog):
"""
For viewing optimization result.
"""
def __init__(
self, result_values: list, target_display: str
):
""""""
super().__init__()
self.result_values = result_values
self.target_display = target_display
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("参数优化结果")
self.resize(1100, 500)
table = QtWidgets.QTableWidget()
table.setColumnCount(2)
table.setRowCount(len(self.result_values))
table.setHorizontalHeaderLabels(["参数", self.target_display])
table.setEditTriggers(table.NoEditTriggers)
table.verticalHeader().setVisible(False)
table.horizontalHeader().setSectionResizeMode(
0, QtWidgets.QHeaderView.ResizeToContents
)
table.horizontalHeader().setSectionResizeMode(
1, QtWidgets.QHeaderView.Stretch
)
for n, tp in enumerate(self.result_values):
setting, target_value, _ = tp
setting_cell = QtWidgets.QTableWidgetItem(str(setting))
target_cell = QtWidgets.QTableWidgetItem(str(target_value))
setting_cell.setTextAlignment(QtCore.Qt.AlignCenter)
target_cell.setTextAlignment(QtCore.Qt.AlignCenter)
table.setItem(n, 0, setting_cell)
table.setItem(n, 1, target_cell)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(table)
self.setLayout(vbox)
class BacktestingTradeMonitor(BaseMonitor):
"""
Monitor for backtesting trade data.
"""
headers = {
"tradeid": {"display": "成交号 ", "cell": BaseCell, "update": False},
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "数量", "cell": BaseCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class BacktestingOrderMonitor(BaseMonitor):
"""
Monitor for backtesting order data.
"""
headers = {
"orderid": {"display": "委托号", "cell": BaseCell, "update": False},
"symbol": {"display": "代码", "cell": BaseCell, "update": False},
"exchange": {"display": "交易所", "cell": EnumCell, "update": False},
"type": {"display": "类型", "cell": EnumCell, "update": False},
"direction": {"display": "方向", "cell": DirectionCell, "update": False},
"offset": {"display": "开平", "cell": EnumCell, "update": False},
"price": {"display": "价格", "cell": BaseCell, "update": False},
"volume": {"display": "总数量", "cell": BaseCell, "update": False},
"traded": {"display": "已成交", "cell": BaseCell, "update": False},
"status": {"display": "状态", "cell": EnumCell, "update": False},
"datetime": {"display": "时间", "cell": BaseCell, "update": False},
"gateway_name": {"display": "接口", "cell": BaseCell, "update": False},
}
class DailyResultMonitor(BaseMonitor):
"""
Monitor for backtesting daily result.
"""
headers = {
"date": {"display": "日期", "cell": BaseCell, "update": False},
"trade_count": {"display": "成交笔数", "cell": BaseCell, "update": False},
"start_pos": {"display": "开盘持仓", "cell": BaseCell, "update": False},
"end_pos": {"display": "收盘持仓", "cell": BaseCell, "update": False},
"turnover": {"display": "成交额", "cell": BaseCell, "update": False},
"commission": {"display": "手续费", "cell": BaseCell, "update": False},
"slippage": {"display": "滑点", "cell": BaseCell, "update": False},
"trading_pnl": {"display": "交易盈亏", "cell": BaseCell, "update": False},
"holding_pnl": {"display": "持仓盈亏", "cell": BaseCell, "update": False},
"total_pnl": {"display": "总盈亏", "cell": BaseCell, "update": False},
"net_pnl": {"display": "净盈亏", "cell": BaseCell, "update": False},
}
class BacktestingResultDialog(QtWidgets.QDialog):
"""
"""
def __init__(
self,
main_engine: MainEngine,
event_engine: EventEngine,
title: str,
table_class: QtWidgets.QTableWidget
):
""""""
super().__init__()
self.main_engine = main_engine
self.event_engine = event_engine
self.title = title
self.table_class = table_class
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle(self.title)
self.resize(1100, 600)
self.table = self.table_class(self.main_engine, self.event_engine)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.table)
self.setLayout(vbox)
def clear_data(self):
""""""
self.updated = False
self.table.setRowCount(0)
def update_data(self, data: list):
""""""
self.updated = True
data.reverse()
for obj in data:
self.table.insert_new_row(obj)
def is_updated(self):
""""""
return self.updated
class CandleChartDialog(QtWidgets.QDialog):
"""
"""
def __init__(self):
""""""
super().__init__()
self.dt_ix_map = {}
self.updated = False
self.init_ui()
def init_ui(self):
""""""
self.setWindowTitle("回测K线图表")
self.resize(1400, 800)
# Create chart widget
self.chart = ChartWidget()
self.chart.add_plot("candle", hide_x_axis=True)
self.chart.add_plot("volume", maximum_height=200)
self.chart.add_item(CandleItem, "candle", "candle")
self.chart.add_item(VolumeItem, "volume", "volume")
self.chart.add_cursor()
# Add scatter item for showing tradings
self.trade_scatter = pg.ScatterPlotItem()
candle_plot = self.chart.get_plot("candle")
candle_plot.addItem(self.trade_scatter)
# Set layout
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.chart)
self.setLayout(vbox)
def update_history(self, history: list):
""""""
self.updated = True
self.chart.update_history(history)
for ix, bar in enumerate(history):
self.dt_ix_map[bar.datetime] = ix
def update_trades(self, trades: list):
""""""
trade_data = []
for trade in trades:
ix = self.dt_ix_map[trade.datetime]
scatter = {
"pos": (ix, trade.price),
"data": 1,
"size": 14,
"pen": pg.mkPen((255, 255, 255))
}
if trade.direction == Direction.LONG:
scatter_symbol = "t1" # Up arrow
else:
scatter_symbol = "t" # Down arrow
if trade.offset == Offset.OPEN:
scatter_brush = pg.mkBrush((255, 255, 0)) # Yellow
else:
scatter_brush = pg.mkBrush((0, 0, 255)) # Blue
scatter["symbol"] = scatter_symbol
scatter["brush"] = scatter_brush
trade_data.append(scatter)
self.trade_scatter.setData(trade_data)
def clear_data(self):
""""""
self.updated = False
self.chart.clear_all()
self.dt_ix_map.clear()
self.trade_scatter.clear()
def is_updated(self):
""""""
return self.updated
| [
[
[
7,
18
],
[
23757,
23759
]
],
[
[
26,
41
],
[
20685,
20687
],
[
23883,
23885
],
[
20903,
20905
],
[
21680,
21682
],
[
21954,
21956
],
[
22086,
22088
],
[
34180,
34182
],
[
34954,
34956
],
[
35244,
35246
],
[
35331,
35333
]
],
[
[
63,
71
],
[
2188,
2196
],
[
9095,
9103
],
[
13673,
13681
],
[
13753,
13761
]
],
[
[
73,
82
],
[
2231,
2240
]
],
[
[
117,
125
],
[
2105,
2113
]
],
[
[
127,
136
],
[
35029,
35038
]
],
[
[
138,
144
],
[
35199,
35205
]
],
[
[
176,
186
],
[
1006,
1016
],
[
32450,
32460
]
],
[
[
214,
220
],
[
825,
831
],
[
884,
890
],
[
944,
950
],
[
2320,
2326
],
[
2512,
2518
],
[
29215,
29221
],
[
29279,
29285
]
],
[
[
222,
231
],
[
722,
731
],
[
15864,
15873
],
[
18942,
18951
],
[
24318,
24327
],
[
27994,
28003
],
[
32360,
32369
],
[
33478,
33487
],
[
1945,
1954
],
[
1995,
2004
],
[
2060,
2069
],
[
2287,
2296
],
[
2479,
2488
],
[
2575,
2584
],
[
2636,
2645
],
[
2688,
2697
],
[
2745,
2754
],
[
2800,
2809
],
[
2861,
2870
],
[
2963,
2972
],
[
3091,
3100
],
[
3220,
3229
],
[
3399,
3408
],
[
3525,
3534
],
[
3700,
3709
],
[
3875,
3884
],
[
4046,
4055
],
[
4211,
4220
],
[
4327,
4336
],
[
4846,
4855
],
[
5412,
5421
],
[
5678,
5687
],
[
6252,
6261
],
[
7050,
7059
],
[
7179,
7188
],
[
17187,
17196
],
[
17358,
17367
],
[
19364,
19373
],
[
19668,
19677
],
[
20069,
20078
],
[
24936,
24945
],
[
24982,
24991
],
[
25091,
25100
],
[
25740,
25749
],
[
25796,
25805
],
[
25847,
25856
],
[
26404,
26413
],
[
26599,
26608
],
[
28428,
28437
],
[
28778,
28787
],
[
28897,
28906
],
[
29059,
29068
],
[
29126,
29135
],
[
29410,
29419
],
[
32538,
32547
],
[
33013,
33022
],
[
34338,
34347
]
],
[
[
233,
238
],
[
19757,
19762
],
[
19885,
19890
],
[
25521,
25526
]
],
[
[
273,
284
],
[
29526,
29537
],
[
30394,
30405
],
[
31398,
31409
]
],
[
[
286,
294
],
[
29666,
29674
],
[
29740,
29748
],
[
29810,
29818
],
[
30108,
30116
],
[
30180,
30188
],
[
30254,
30262
],
[
30332,
30340
],
[
30533,
30541
],
[
30603,
30611
],
[
30971,
30979
],
[
31046,
31054
],
[
31119,
31127
],
[
31263,
31271
],
[
31341,
31349
],
[
31533,
31541
],
[
31616,
31624
],
[
31693,
31701
],
[
31768,
31776
],
[
31841,
31849
],
[
31918,
31926
],
[
31990,
31998
],
[
32073,
32081
],
[
32152,
32160
],
[
32226,
32234
],
[
32300,
32308
]
],
[
[
296,
309
],
[
29960,
29973
],
[
30823,
30836
]
],
[
[
311,
319
],
[
29887,
29895
],
[
30037,
30045
],
[
30680,
30688
],
[
30748,
30756
],
[
30900,
30908
],
[
31189,
31197
]
],
[
[
354,
364
],
[
7375,
7385
]
],
[
[
388,
393
],
[
843,
848
],
[
902,
907
],
[
962,
967
],
[
8953,
8958
],
[
9261,
9266
],
[
9722,
9727
]
],
[
[
395,
406
],
[
1032,
1043
],
[
32484,
32495
]
],
[
[
430,
441
],
[
33822,
33833
]
],
[
[
443,
453
],
[
33978,
33988
]
],
[
[
455,
465
],
[
34038,
34048
]
],
[
[
498,
507
],
[
7465,
7474
]
],
[
[
509,
518
],
[
10883,
10892
]
],
[
[
547,
555
],
[
1226,
1234
]
],
[
[
561,
581
],
[
8615,
8635
]
],
[
[
587,
624
],
[
8707,
8744
]
],
[
[
630,
668
],
[
8833,
8871
]
],
[
[
674,
693
],
[
27080,
27099
]
],
[
[
704,
721
]
],
[
[
15846,
15863
],
[
6204,
6221
]
],
[
[
18917,
18941
],
[
11034,
11058
],
[
19144,
19168
]
],
[
[
20669,
20684
],
[
6343,
6358
]
],
[
[
23874,
23882
],
[
21071,
21079
],
[
21251,
21259
],
[
21426,
21434
]
],
[
[
24292,
24317
],
[
12786,
12811
]
],
[
[
27968,
27993
],
[
14107,
14132
]
],
[
[
29502,
29525
],
[
6551,
6574
]
],
[
[
30370,
30393
],
[
6733,
6756
]
],
[
[
31379,
31397
],
[
6915,
6933
]
],
[
[
32336,
32359
],
[
6431,
6454
],
[
6613,
6636
],
[
6795,
6818
]
],
[
[
33460,
33477
],
[
6997,
7014
]
]
] |
#
# @lc app=leetcode id=677 lang=python3
#
# [677] Map Sum Pairs
# https://leetcode.com/problems/map-sum-pairs/
# This problem is about the trie data structure. Each node keeps track of the sum of its children.
# A new key overrides the original values.
#
import unittest
from typing import Dict
# @lc code=start
class Node:
def __init__(self, val: int = 0):
self.value = val
self.children: Dict[str, Node] = {}
class MapSum:
def __init__(self) -> None:
"""
Initialize your data structure here.
"""
self.root_node = Node()
self.keys: Dict[str, int] = {}
def insert(self, key: str, val: int) -> None:
# override if key already exists
val_diff = val - self.keys.get(key, 0)
self.keys[key] = val
# track count of prefix characters
node = self.root_node
for c in key:
if c not in node.children:
node.children[c] = Node()
node = node.children[c]
node.value += val_diff
def sum(self, prefix: str) -> int:
node = self.root_node
for c in prefix:
# return 0 if prefix doesn't exist
if c not in node.children:
return 0
node = node.children[c]
return node.value
# Your MapSum object will be instantiated and called as such:
# obj = MapSum()
# obj.insert(key,val)
# param_2 = obj.sum(prefix)
# @lc code=end
class TestSolution(unittest.TestCase):
def test_given(self) -> None:
x = MapSum()
x.insert("apple", 3)
self.assertEqual(x.sum("ap"), 3)
x.insert("app", 2)
self.assertEqual(x.sum("ap"), 5)
def test_override(self) -> None:
x = MapSum()
x.insert("apple", 3)
x.insert("app", 2)
x.insert("apple", 8)
self.assertEqual(x.sum("ap"), 10)
if __name__ == "__main__":
unittest.main()
| [
[
[
271,
279
],
[
1526,
1534
],
[
1975,
1983
]
],
[
[
300,
304
],
[
427,
431
],
[
625,
629
]
],
[
[
332,
336
],
[
437,
441
],
[
598,
602
],
[
994,
998
]
],
[
[
459,
465
],
[
1594,
1600
],
[
1798,
1804
]
],
[
[
1513,
1525
]
]
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetSecurityContactResult',
'AwaitableGetSecurityContactResult',
'get_security_contact',
]
@pulumi.output_type
class GetSecurityContactResult:
"""
Contact details for security issues
"""
def __init__(__self__, alert_notifications=None, alerts_to_admins=None, email=None, id=None, name=None, phone=None, type=None):
if alert_notifications and not isinstance(alert_notifications, str):
raise TypeError("Expected argument 'alert_notifications' to be a str")
pulumi.set(__self__, "alert_notifications", alert_notifications)
if alerts_to_admins and not isinstance(alerts_to_admins, str):
raise TypeError("Expected argument 'alerts_to_admins' to be a str")
pulumi.set(__self__, "alerts_to_admins", alerts_to_admins)
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
pulumi.set(__self__, "email", email)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if phone and not isinstance(phone, str):
raise TypeError("Expected argument 'phone' to be a str")
pulumi.set(__self__, "phone", phone)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="alertNotifications")
def alert_notifications(self) -> str:
"""
Whether to send security alerts notifications to the security contact
"""
return pulumi.get(self, "alert_notifications")
@property
@pulumi.getter(name="alertsToAdmins")
def alerts_to_admins(self) -> str:
"""
Whether to send security alerts notifications to subscription admins
"""
return pulumi.get(self, "alerts_to_admins")
@property
@pulumi.getter
def email(self) -> str:
"""
The email of this security contact
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def phone(self) -> Optional[str]:
"""
The phone number of this security contact
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetSecurityContactResult(GetSecurityContactResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSecurityContactResult(
alert_notifications=self.alert_notifications,
alerts_to_admins=self.alerts_to_admins,
email=self.email,
id=self.id,
name=self.name,
phone=self.phone,
type=self.type)
def get_security_contact(security_contact_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSecurityContactResult:
"""
Contact details for security issues
:param str security_contact_name: Name of the security contact object
"""
__args__ = dict()
__args__['securityContactName'] = security_contact_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:security/v20170801preview:getSecurityContact', __args__, opts=opts, typ=GetSecurityContactResult).value
return AwaitableGetSecurityContactResult(
alert_notifications=__ret__.alert_notifications,
alerts_to_admins=__ret__.alerts_to_admins,
email=__ret__.email,
id=__ret__.id,
name=__ret__.name,
phone=__ret__.phone,
type=__ret__.type)
| [
[
[
176,
184
]
],
[
[
192,
198
]
],
[
[
206,
220
],
[
434,
440
],
[
1934,
1940
],
[
2194,
2200
],
[
2443,
2449
],
[
2613,
2619
],
[
2754,
2760
],
[
2901,
2907
],
[
3088,
3094
],
[
841,
847
],
[
1065,
1071
],
[
1250,
1256
],
[
1404,
1410
],
[
1558,
1564
],
[
1719,
1725
],
[
1879,
1885
],
[
2134,
2140
],
[
2386,
2392
],
[
2567,
2573
],
[
2711,
2717
],
[
2856,
2862
],
[
3042,
3048
],
[
3190,
3196
],
[
3795,
3801
],
[
4113,
4119
],
[
4227,
4233
]
],
[
[
240,
243
]
],
[
[
245,
252
]
],
[
[
254,
262
],
[
2938,
2946
],
[
3733,
3741
],
[
3786,
3794
]
],
[
[
264,
272
]
],
[
[
274,
279
]
],
[
[
296,
306
],
[
4188,
4198
]
],
[
[
308,
315
]
],
[
[
317,
324
]
],
[
[
459,
483
],
[
3257,
3281
],
[
3407,
3431
],
[
4336,
4360
]
],
[
[
3223,
3256
],
[
3828,
3861
],
[
4380,
4413
]
],
[
[
3689,
3709
]
]
] |
__author__ = 'Niklas Rosenstein <rosensteinniklas@gmail.com>'
__version__ = '1.7.6'
import copy
import glob
import os
import pkgutil
import sys
import traceback
import typing as t
import zipfile
if t.TYPE_CHECKING:
from sys import _MetaPathFinder
def is_local(filename: str, pathlist: t.List[str]) -> bool:
''' Returns True if *filename* is a subpath of any of the paths in *pathlist*. '''
filename = os.path.abspath(filename)
for path_name in pathlist:
path_name = os.path.abspath(path_name)
if is_subpath(filename, path_name):
return True
return False
def is_subpath(path: str, parent: str) -> bool:
''' Returns True if *path* points to the same or a subpath of *parent*. '''
try:
relpath = os.path.relpath(path, parent)
except ValueError:
return False # happens on Windows if drive letters don't match
return relpath == os.curdir or not relpath.startswith(os.pardir)
def eval_pth(
filename: str,
sitedir: str,
dest: t.Optional[t.List[str]] = None,
imports: t.Optional[t.List[t.Tuple[str, int, str]]] = None,
) -> t.List[str]:
''' Evaluates a `.pth` file (including support for `import` statements), and appends the result to the list
*dest*. If *dest* is #None, it will fall back to `sys.path`.
If *imports* is specified, it must be a list. `import` statements will not executed but instead appended to
that list in tuples of (*filename*, *line*, *stmt*).
'''
if dest is None:
dest = sys.path
if not os.path.isfile(filename):
return []
with open(filename, 'r') as fp:
for index, line in enumerate(fp):
if line.startswith('import'):
if imports is None:
exec_pth_import(filename, index+1, line)
else:
imports.append((filename, index+1, line))
else:
index = line.find('#')
if index > 0: line = line[:index]
line = line.strip()
if not os.path.isabs(line):
line = os.path.join(os.path.dirname(filename), line)
line = os.path.normpath(line)
if line and line not in dest:
dest.insert(0, line)
return dest
def exec_pth_import(filename: str, lineno: int, line: str) -> None:
line = '\n' * (lineno - 1) + line.strip()
try:
exec(compile(line, filename, 'exec'))
except BaseException:
traceback.print_exc()
def extend_path(pth: t.List[str], name: str) -> t.List[str]:
''' Better implementation of #pkgutil.extend_path() which adds support for zipped Python eggs. The original
#pkgutil.extend_path() gets mocked by this function inside the #localimport context.
'''
def zip_isfile(z, name):
name.rstrip('/')
return name in z.namelist()
pname = os.path.join(*name.split('.'))
zname = '/'.join(name.split('.'))
init_py = '__init__' + os.extsep + 'py'
init_pyc = '__init__' + os.extsep + 'pyc'
init_pyo = '__init__' + os.extsep + 'pyo'
mod_path = list(pth)
for path in sys.path:
if zipfile.is_zipfile(path):
try:
egg = zipfile.ZipFile(path, 'r')
addpath = (
zip_isfile(egg, zname + '/__init__.py') or
zip_isfile(egg, zname + '/__init__.pyc') or
zip_isfile(egg, zname + '/__init__.pyo'))
fpath = os.path.join(path, path, zname)
if addpath and fpath not in mod_path:
mod_path.append(fpath)
except (zipfile.BadZipfile, zipfile.LargeZipFile):
pass # xxx: Show a warning at least?
else:
path = os.path.join(path, pname)
if os.path.isdir(path) and path not in mod_path:
addpath = (
os.path.isfile(os.path.join(path, init_py)) or
os.path.isfile(os.path.join(path, init_pyc)) or
os.path.isfile(os.path.join(path, init_pyo)))
if addpath and path not in mod_path:
mod_path.append(path)
return [os.path.normpath(x) for x in mod_path]
class localimport:
def __init__(
self,
path: t.Union[t.List[str], str],
parent_dir: t.Optional[str] = None,
do_eggs: bool = True,
do_pth: bool = True,
do_autodisable: bool = True,
) -> None:
if not parent_dir:
frame = sys._getframe(1).f_globals
if '__file__' in frame:
parent_dir = os.path.dirname(os.path.abspath(frame['__file__']))
# Convert relative paths to absolute paths with parent_dir and
# evaluate .egg files in the specified directories.
self.path = []
if isinstance(path, str):
path = [path]
for path_name in path:
if not os.path.isabs(path_name):
if not parent_dir:
raise ValueError('relative path but no parent_dir')
path_name = os.path.join(parent_dir, path_name)
path_name = os.path.normpath(path_name)
self.path.append(path_name)
if do_eggs:
self.path.extend(glob.glob(os.path.join(path_name, '*.egg')))
self.meta_path: t.List[_MetaPathFinder] = []
self.modules: t.Dict[str, t.Any] = {}
self.do_pth = do_pth
self.in_context = False
self.do_autodisable = do_autodisable
self.pth_imports: t.List[t.Tuple[str, int, str]] = []
if self.do_pth:
seen = set()
for path_name in self.path:
for fn in glob.glob(os.path.join(path_name, '*.pth')):
if fn in seen: continue
seen.add(fn)
eval_pth(fn, path_name, dest=self.path, imports=self.pth_imports)
def __enter__(self) -> 'localimport':
# pkg_resources comes with setuptools.
try:
import pkg_resources
nsdict = copy.deepcopy(pkg_resources._namespace_packages) # type: ignore
declare_namespace = pkg_resources.declare_namespace
pkg_resources.declare_namespace = self._declare_namespace # type: ignore
except ImportError:
nsdict = None
declare_namespace = None
# Save the global importer state.
self.state = {
'nsdict': nsdict,
'declare_namespace': declare_namespace,
'nspaths': {},
'path': sys.path[:],
'meta_path': sys.meta_path[:],
'disables': {},
'pkgutil.extend_path': pkgutil.extend_path,
}
# Update the systems meta path and apply function mocks.
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path + sys.meta_path
pkgutil.extend_path = extend_path # type: ignore
# If this function is called not the first time, we need to
# restore the modules that have been imported with it and
# temporarily disable the ones that would be shadowed.
for key, mod in list(self.modules.items()):
try: self.state['disables'][key] = sys.modules.pop(key)
except KeyError: pass
sys.modules[key] = mod
# Evaluate imports from the .pth files, if any.
for fn, lineno, stmt in self.pth_imports:
exec_pth_import(fn, lineno, stmt)
# Add the original path to sys.path.
sys.path += self.state['path']
# Update the __path__ of all namespace modules.
for key, mod in list(sys.modules.items()):
if mod is None:
# Relative imports could have lead to None-entries in
# sys.modules. Get rid of them so they can be re-evaluated.
prefix = key.rpartition('.')[0]
if hasattr(sys.modules.get(prefix), '__path__'):
del sys.modules[key]
elif hasattr(mod, '__path__'):
self.state['nspaths'][key] = copy.copy(mod.__path__)
mod.__path__ = pkgutil.extend_path(mod.__path__, mod.__name__)
self.in_context = True
if self.do_autodisable:
self.autodisable()
return self
def __exit__(self, *__) -> None:
if not self.in_context:
raise RuntimeError('context not entered')
# Figure the difference of the original sys.path and the
# current path. The list of paths will be used to determine
# what modules are local and what not.
local_paths = []
for path in sys.path:
if path not in self.state['path']:
local_paths.append(path)
for path in self.path:
if path not in local_paths:
local_paths.append(path)
# Move all meta path objects to self.meta_path that have not
# been there before and have not been in the list before.
for meta in sys.meta_path:
if meta is not self and meta not in self.state['meta_path']:
if meta not in self.meta_path:
self.meta_path.append(meta)
# Move all modules that shadow modules of the original system
# state or modules that are from any of the localimport context
# paths away.
modules = sys.modules.copy()
for key, mod in modules.items():
force_pop = False
filename = getattr(mod, '__file__', None)
if not filename and key not in sys.builtin_module_names:
parent = key.rsplit('.', 1)[0]
if parent in modules:
filename = getattr(modules[parent], '__file__', None)
else:
force_pop = True
if force_pop or (filename and is_local(filename, local_paths)):
self.modules[key] = sys.modules.pop(key)
# Restore the disabled modules.
sys.modules.update(self.state['disables'])
for key, mod in self.state['disables'].items():
try: parent_name = key.split('.')[-2]
except IndexError: parent_name = None
if parent_name and parent_name in sys.modules:
parent_module = sys.modules[parent_name]
setattr(parent_module, key.split('.')[-1], mod)
# Restore the original __path__ value of namespace packages.
for key, path_list in self.state['nspaths'].items():
try: sys.modules[key].__path__ = path_list
except KeyError: pass
# Restore the original state of the global importer.
sys.path[:] = self.state['path']
sys.meta_path[:] = self.state['meta_path']
pkgutil.extend_path = self.state['pkgutil.extend_path']
try:
import pkg_resources
pkg_resources.declare_namespace = self.state['declare_namespace']
pkg_resources._namespace_packages.clear() # type: ignore
pkg_resources._namespace_packages.update(self.state['nsdict']) # type: ignore
except ImportError: pass
self.in_context = False
del self.state
def _declare_namespace(self, package_name: str) -> None:
'''
Mock for #pkg_resources.declare_namespace() which calls
#pkgutil.extend_path() afterwards as the original implementation doesn't
seem to properly find all available namespace paths.
'''
self.state['declare_namespace'](package_name)
mod = sys.modules[package_name]
mod.__path__ = pkgutil.extend_path(mod.__path__, package_name) # type: ignore
def discover(self) -> t.Iterable[pkgutil.ModuleInfo]:
return pkgutil.iter_modules(self.path)
def disable(self, module: t.Union[t.List[str], str]) -> None:
if not isinstance(module, str):
for module_name in module:
self.disable(module_name)
return
sub_prefix = module + '.'
modules = {}
for key, mod in sys.modules.items():
if key == module or key.startswith(sub_prefix):
try: parent_name = '.'.join(key.split('.')[:-1])
except IndexError: parent_name = None
# Delete the child module reference from the parent module.
modules[key] = mod
if parent_name and parent_name in sys.modules:
parent = sys.modules[parent_name]
try:
delattr(parent, key.split('.')[-1])
except AttributeError:
pass
# Pop all the modules we found from sys.modules
for key, mod in modules.items():
del sys.modules[key]
self.state['disables'][key] = mod
def autodisable(self) -> None:
for loader, name, ispkg in self.discover():
self.disable(name)
| [
[
[
1,
11
]
],
[
[
63,
74
]
],
[
[
93,
97
],
[
5455,
5459
],
[
7245,
7249
]
],
[
[
105,
109
],
[
4761,
4765
],
[
5142,
5146
]
],
[
[
117,
119
],
[
413,
415
],
[
484,
486
],
[
734,
736
],
[
873,
875
],
[
909,
911
],
[
1484,
1486
],
[
1906,
1908
],
[
1944,
1946
],
[
1957,
1959
],
[
2005,
2007
],
[
2684,
2686
],
[
2776,
2778
],
[
2819,
2821
],
[
2863,
2865
],
[
3209,
3211
],
[
3446,
3448
],
[
3481,
3483
],
[
3557,
3559
],
[
3572,
3574
],
[
3614,
3616
],
[
3629,
3631
],
[
3672,
3674
],
[
3687,
3689
],
[
3806,
3808
],
[
4182,
4184
],
[
4198,
4200
],
[
4467,
4469
],
[
4602,
4604
],
[
4656,
4658
],
[
4771,
4773
],
[
5152,
5154
]
],
[
[
127,
134
],
[
5997,
6004
],
[
6172,
6179
],
[
7292,
7299
],
[
9615,
9622
],
[
10381,
10388
],
[
10481,
10488
],
[
10513,
10520
]
],
[
[
142,
145
],
[
1465,
1468
],
[
2919,
2922
],
[
4104,
4107
],
[
5896,
5899
],
[
5928,
5931
],
[
6090,
6093
],
[
6154,
6157
],
[
6118,
6121
],
[
6497,
6500
],
[
6552,
6555
],
[
6760,
6763
],
[
6869,
6872
],
[
7102,
7105
],
[
7154,
7157
],
[
7755,
7758
],
[
8077,
8080
],
[
8403,
8406
],
[
8568,
8571
],
[
8866,
8869
],
[
8928,
8931
],
[
9151,
9154
],
[
9188,
9191
],
[
9403,
9406
],
[
9531,
9534
],
[
9568,
9571
],
[
10336,
10339
],
[
10794,
10797
],
[
11110,
11113
],
[
11142,
11145
],
[
11380,
11383
]
],
[
[
153,
162
],
[
2303,
2312
]
],
[
[
170,
181
],
[
201,
202
],
[
292,
293
],
[
1076,
1077
],
[
977,
978
],
[
988,
989
],
[
1020,
1021
],
[
1031,
1032
],
[
1038,
1039
],
[
2375,
2376
],
[
2348,
2349
],
[
3903,
3904
],
[
3911,
3912
],
[
3946,
3947
],
[
4827,
4828
],
[
4874,
4875
],
[
4886,
4887
],
[
5014,
5015
],
[
5021,
5022
],
[
10470,
10471
],
[
10574,
10575
],
[
10582,
10583
]
],
[
[
189,
196
],
[
2936,
2943
],
[
2987,
2994
],
[
3334,
3341
],
[
3354,
3361
]
],
[
[
236,
251
],
[
4834,
4849
]
],
[
[
258,
266
],
[
8804,
8812
]
],
[
[
590,
600
],
[
518,
528
]
],
[
[
926,
934
],
[
5254,
5262
]
],
[
[
2118,
2133
],
[
1671,
1686
],
[
6680,
6695
]
],
[
[
2331,
2342
],
[
6194,
6205
]
],
[
[
3853,
3864
]
]
] |
import numpy as np
import h5py
import pandas as pd
from svhn_io import load_svhn
from keras_uncertainty.utils import classifier_calibration_curve, classifier_calibration_error
EPSILON = 1e-10
def load_hdf5_data(filename):
inp = h5py.File(filename, "r")
preds = inp["preds"][...]
inp.close()
return preds
NUM_ENSEMBLES = 15
NUM_BINS=7
#IOD_FILE_PATTERN = "cnn_svhn-num_ens-{}-preds.hdf5"
#OUTPUT_PATTERN = "svhn-calibration-sub-deepensembles_1_num-ens-{}_cnn_svhn.csv"
IOD_FILE_PATTERN = "deepensembles-cnn_svhn-num_ens-{}-preds.hdf5"
OUTPUT_PATTERN = "svhn-calibration-deepensembles-num-ens-{}_cnn_svhn.csv"
if __name__ == "__main__":
for num_ens in range(1, NUM_ENSEMBLES + 1):
(_, __), (___, y_true) = load_svhn()
y_true = y_true.flatten()
y_probs = load_hdf5_data(IOD_FILE_PATTERN.format(num_ens))
y_confs = np.max(y_probs, axis=1)
y_pred = np.argmax(y_probs, axis=1)
curve_conf, curve_acc = classifier_calibration_curve(y_pred, y_true, y_confs, num_bins=NUM_BINS)
error = classifier_calibration_error(y_pred, y_true, y_confs, num_bins=NUM_BINS)
print("Processing calibration curve for {} ensembles. Error: {}".format(num_ens, error))
output_df = pd.DataFrame(data={"conf": curve_conf, "acc": curve_acc})
output_df.to_csv(OUTPUT_PATTERN.format(num_ens), sep=';', index=False) | [
[
[
7,
18
],
[
873,
875
],
[
914,
916
]
],
[
[
26,
30
],
[
235,
239
]
],
[
[
38,
50
],
[
1255,
1257
]
],
[
[
72,
81
],
[
741,
750
]
],
[
[
118,
146
],
[
974,
1002
]
],
[
[
148,
176
],
[
1063,
1091
]
],
[
[
178,
185
]
],
[
[
199,
213
],
[
806,
820
]
],
[
[
326,
339
],
[
688,
701
]
],
[
[
345,
353
],
[
1037,
1045
],
[
1126,
1134
]
],
[
[
492,
508
],
[
821,
837
]
],
[
[
558,
572
],
[
1338,
1352
]
],
[
[
668,
675
],
[
845,
852
],
[
1217,
1224
],
[
1360,
1367
]
],
[
[
717,
718
]
],
[
[
720,
722
]
],
[
[
726,
729
]
],
[
[
731,
737
],
[
770,
776
]
],
[
[
761,
767
],
[
1011,
1017
],
[
1100,
1106
]
],
[
[
796,
803
],
[
880,
887
],
[
924,
931
]
],
[
[
863,
870
],
[
1019,
1026
],
[
1108,
1115
]
],
[
[
905,
911
],
[
1003,
1009
],
[
1092,
1098
]
],
[
[
950,
960
],
[
1282,
1292
]
],
[
[
962,
971
],
[
1301,
1310
]
],
[
[
1055,
1060
],
[
1226,
1231
]
],
[
[
1243,
1252
],
[
1321,
1330
]
]
] |
from unittest import TestCase
from rockaway.models import Track
class TestTrackBasics(TestCase):
def test_track_create_no_args(self):
track = Track()
self.assertFalse(track.hasDbEntry())
self.assertFalse(track.hasFile())
def test_track_create(self):
args = {"Title": "Rockaway Beach",
"Artist": "The Ramones", # FIXME--This and album will not just be strings
"Album": "Rocket to Russia",
"Year": 1977,
"Genre": "Punk Rock",
"Time": 126000}
track = Track(**args)
self.assertEqual(track.Title, args["Title"])
self.assertEqual(track.Year, 1977)
# Alternate ways of looking up attributes
self.assertEqual(track.genre, track.Genre)
self.assertEqual(track.Time, track["Time"])
| [
[
[
21,
29
],
[
88,
96
]
],
[
[
59,
64
],
[
158,
163
],
[
583,
588
]
],
[
[
72,
87
]
]
] |
"""
Implementation of functions in the Numpy package.
"""
import math
import sys
import itertools
from collections import namedtuple
from llvmlite.llvmpy import core as lc
import numpy as np
import operator
from . import builtins, callconv, ufunc_db, arrayobj
from .imputils import Registry, impl_ret_new_ref, force_error_model
from .. import typing, types, cgutils, numpy_support, utils
from ..numpy_support import ufunc_find_matching_loop, select_array_wrapper, from_dtype
from ..typing import npydecl
from ..extending import overload, intrinsic
from .. import errors
registry = Registry()
lower = registry.lower
########################################################################
# In the way we generate code, ufuncs work with scalar as well as
# with array arguments. The following helper classes help dealing
# with scalar and array arguments in a regular way.
#
# In short, the classes provide a uniform interface. The interface
# handles the indexing of as many dimensions as the array may have.
# For scalars, all indexing is ignored and when the value is read,
# the scalar is returned. For arrays code for actual indexing is
# generated and reading performs the appropriate indirection.
class _ScalarIndexingHelper(object):
def update_indices(self, loop_indices, name):
pass
def as_values(self):
pass
class _ScalarHelper(object):
"""Helper class to handle scalar arguments (and result).
Note that store_data is only used when generating code for
a scalar ufunc and to write the output value.
For loading, the value is directly used without having any
kind of indexing nor memory backing it up. This is the use
for input arguments.
For storing, a variable is created in the stack where the
value will be written.
Note that it is not supported (as it is unneeded for our
current use-cases) reading back a stored value. This class
will always "load" the original value it got at its creation.
"""
def __init__(self, ctxt, bld, val, ty):
self.context = ctxt
self.builder = bld
self.val = val
self.base_type = ty
intpty = ctxt.get_value_type(types.intp)
self.shape = [lc.Constant.int(intpty, 1)]
lty = ctxt.get_data_type(ty) if ty != types.boolean else lc.Type.int(1)
self._ptr = cgutils.alloca_once(bld, lty)
def create_iter_indices(self):
return _ScalarIndexingHelper()
def load_data(self, indices):
return self.val
def store_data(self, indices, val):
self.builder.store(val, self._ptr)
@property
def return_val(self):
return self.builder.load(self._ptr)
class _ArrayIndexingHelper(namedtuple('_ArrayIndexingHelper',
('array', 'indices'))):
def update_indices(self, loop_indices, name):
bld = self.array.builder
intpty = self.array.context.get_value_type(types.intp)
ONE = lc.Constant.int(lc.Type.int(intpty.width), 1)
# we are only interested in as many inner dimensions as dimensions
# the indexed array has (the outer dimensions are broadcast, so
# ignoring the outer indices produces the desired result.
indices = loop_indices[len(loop_indices) - len(self.indices):]
for src, dst, dim in zip(indices, self.indices, self.array.shape):
cond = bld.icmp(lc.ICMP_UGT, dim, ONE)
with bld.if_then(cond):
bld.store(src, dst)
def as_values(self):
"""
The indexing helper is built using alloca for each value, so it
actually contains pointers to the actual indices to load. Note
that update_indices assumes the same. This method returns the
indices as values
"""
bld = self.array.builder
return [bld.load(index) for index in self.indices]
class _ArrayHelper(namedtuple('_ArrayHelper', ('context', 'builder',
'shape', 'strides', 'data',
'layout', 'base_type', 'ndim',
'return_val'))):
"""Helper class to handle array arguments/result.
It provides methods to generate code loading/storing specific
items as well as support code for handling indices.
"""
def create_iter_indices(self):
intpty = self.context.get_value_type(types.intp)
ZERO = lc.Constant.int(lc.Type.int(intpty.width), 0)
indices = []
for i in range(self.ndim):
x = cgutils.alloca_once(self.builder, lc.Type.int(intpty.width))
self.builder.store(ZERO, x)
indices.append(x)
return _ArrayIndexingHelper(self, indices)
def _load_effective_address(self, indices):
return cgutils.get_item_pointer2(self.context,
self.builder,
data=self.data,
shape=self.shape,
strides=self.strides,
layout=self.layout,
inds=indices)
def load_data(self, indices):
model = self.context.data_model_manager[self.base_type]
ptr = self._load_effective_address(indices)
return model.load_from_data_pointer(self.builder, ptr)
def store_data(self, indices, value):
ctx = self.context
bld = self.builder
store_value = ctx.get_value_as_data(bld, self.base_type, value)
assert ctx.get_data_type(self.base_type) == store_value.type
bld.store(store_value, self._load_effective_address(indices))
def _prepare_argument(ctxt, bld, inp, tyinp, where='input operand'):
"""returns an instance of the appropriate Helper (either
_ScalarHelper or _ArrayHelper) class to handle the argument.
using the polymorphic interface of the Helper classes, scalar
and array cases can be handled with the same code"""
# first un-Optional Optionals
if isinstance(tyinp, types.Optional):
oty = tyinp
tyinp = tyinp.type
inp = ctxt.cast(bld, inp, oty, tyinp)
# then prepare the arg for a concrete instance
if isinstance(tyinp, types.ArrayCompatible):
ary = ctxt.make_array(tyinp)(ctxt, bld, inp)
shape = cgutils.unpack_tuple(bld, ary.shape, tyinp.ndim)
strides = cgutils.unpack_tuple(bld, ary.strides, tyinp.ndim)
return _ArrayHelper(ctxt, bld, shape, strides, ary.data,
tyinp.layout, tyinp.dtype, tyinp.ndim, inp)
elif types.unliteral(tyinp) in types.number_domain | set([types.boolean]):
return _ScalarHelper(ctxt, bld, inp, tyinp)
else:
raise NotImplementedError('unsupported type for {0}: {1}'.format(where, str(tyinp)))
_broadcast_onto_sig = types.intp(types.intp, types.CPointer(types.intp),
types.intp, types.CPointer(types.intp))
def _broadcast_onto(src_ndim, src_shape, dest_ndim, dest_shape):
'''Low-level utility function used in calculating a shape for
an implicit output array. This function assumes that the
destination shape is an LLVM pointer to a C-style array that was
already initialized to a size of one along all axes.
Returns an integer value:
>= 1 : Succeeded. Return value should equal the number of dimensions in
the destination shape.
0 : Failed to broadcast because source shape is larger than the
destination shape (this case should be weeded out at type
checking).
< 0 : Failed to broadcast onto destination axis, at axis number ==
-(return_value + 1).
'''
if src_ndim > dest_ndim:
# This check should have been done during type checking, but
# let's be defensive anyway...
return 0
else:
src_index = 0
dest_index = dest_ndim - src_ndim
while src_index < src_ndim:
src_dim_size = src_shape[src_index]
dest_dim_size = dest_shape[dest_index]
# Check to see if we've already mutated the destination
# shape along this axis.
if dest_dim_size != 1:
# If we have mutated the destination shape already,
# then the source axis size must either be one,
# or the destination axis size.
if src_dim_size != dest_dim_size and src_dim_size != 1:
return -(dest_index + 1)
elif src_dim_size != 1:
# If the destination size is still its initial
dest_shape[dest_index] = src_dim_size
src_index += 1
dest_index += 1
return dest_index
def _build_array(context, builder, array_ty, input_types, inputs):
"""Utility function to handle allocation of an implicit output array
given the target context, builder, output array type, and a list of
_ArrayHelper instances.
"""
intp_ty = context.get_value_type(types.intp)
def make_intp_const(val):
return context.get_constant(types.intp, val)
ZERO = make_intp_const(0)
ONE = make_intp_const(1)
src_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"src_shape")
dest_ndim = make_intp_const(array_ty.ndim)
dest_shape = cgutils.alloca_once(builder, intp_ty, array_ty.ndim,
"dest_shape")
dest_shape_addrs = tuple(cgutils.gep_inbounds(builder, dest_shape, index)
for index in range(array_ty.ndim))
# Initialize the destination shape with all ones.
for dest_shape_addr in dest_shape_addrs:
builder.store(ONE, dest_shape_addr)
# For each argument, try to broadcast onto the destination shape,
# mutating along any axis where the argument shape is not one and
# the destination shape is one.
for arg_number, arg in enumerate(inputs):
if not hasattr(arg, "ndim"): # Skip scalar arguments
continue
arg_ndim = make_intp_const(arg.ndim)
for index in range(arg.ndim):
builder.store(arg.shape[index],
cgutils.gep_inbounds(builder, src_shape, index))
arg_result = context.compile_internal(
builder, _broadcast_onto, _broadcast_onto_sig,
[arg_ndim, src_shape, dest_ndim, dest_shape])
with cgutils.if_unlikely(builder,
builder.icmp(lc.ICMP_SLT, arg_result, ONE)):
msg = "unable to broadcast argument %d to output array" % (
arg_number,)
loc = errors.loc_info.get('loc', None)
if loc is not None:
msg += '\nFile "%s", line %d, ' % (loc.filename, loc.line)
context.call_conv.return_user_exc(builder, ValueError, (msg,))
real_array_ty = array_ty.as_array
dest_shape_tup = tuple(builder.load(dest_shape_addr)
for dest_shape_addr in dest_shape_addrs)
array_val = arrayobj._empty_nd_impl(context, builder, real_array_ty,
dest_shape_tup)
# Get the best argument to call __array_wrap__ on
array_wrapper_index = select_array_wrapper(input_types)
array_wrapper_ty = input_types[array_wrapper_index]
try:
# __array_wrap__(source wrapped array, out array) -> out wrapped array
array_wrap = context.get_function('__array_wrap__',
array_ty(array_wrapper_ty, real_array_ty))
except NotImplementedError:
# If it's the same priority as a regular array, assume we
# should use the allocated array unchanged.
if array_wrapper_ty.array_priority != types.Array.array_priority:
raise
out_val = array_val._getvalue()
else:
wrap_args = (inputs[array_wrapper_index].return_val, array_val._getvalue())
out_val = array_wrap(builder, wrap_args)
ndim = array_ty.ndim
shape = cgutils.unpack_tuple(builder, array_val.shape, ndim)
strides = cgutils.unpack_tuple(builder, array_val.strides, ndim)
return _ArrayHelper(context, builder, shape, strides, array_val.data,
array_ty.layout, array_ty.dtype, ndim,
out_val)
def numpy_ufunc_kernel(context, builder, sig, args, kernel_class,
explicit_output=True):
# This is the code generator that builds all the looping needed
# to execute a numpy functions over several dimensions (including
# scalar cases).
#
# context - the code generation context
# builder - the code emitter
# sig - signature of the ufunc
# args - the args to the ufunc
# kernel_class - a code generating subclass of _Kernel that provides
# explicit_output - if the output was explicit in the call
# (ie: np.add(x,y,r))
arguments = [_prepare_argument(context, builder, arg, tyarg)
for arg, tyarg in zip(args, sig.args)]
if not explicit_output:
ret_ty = sig.return_type
if isinstance(ret_ty, types.ArrayCompatible):
output = _build_array(context, builder, ret_ty, sig.args, arguments)
else:
output = _prepare_argument(
context, builder,
lc.Constant.null(context.get_value_type(ret_ty)), ret_ty)
arguments.append(output)
elif context.enable_nrt:
# Incref the output
context.nrt.incref(builder, sig.return_type, args[-1])
inputs = arguments[0:-1]
output = arguments[-1]
outer_sig = [a.base_type for a in arguments]
#signature expects return type first, while we have it last:
outer_sig = outer_sig[-1:] + outer_sig[:-1]
outer_sig = typing.signature(*outer_sig)
kernel = kernel_class(context, builder, outer_sig)
intpty = context.get_value_type(types.intp)
indices = [inp.create_iter_indices() for inp in inputs]
loopshape = output.shape
with cgutils.loop_nest(builder, loopshape, intp=intpty) as loop_indices:
vals_in = []
for i, (index, arg) in enumerate(zip(indices, inputs)):
index.update_indices(loop_indices, i)
vals_in.append(arg.load_data(index.as_values()))
val_out = kernel.generate(*vals_in)
output.store_data(loop_indices, val_out)
out = arguments[-1].return_val
return impl_ret_new_ref(context, builder, sig.return_type, out)
# Kernels are the code to be executed inside the multidimensional loop.
class _Kernel(object):
def __init__(self, context, builder, outer_sig):
self.context = context
self.builder = builder
self.outer_sig = outer_sig
def cast(self, val, fromty, toty):
"""Numpy uses cast semantics that are different from standard Python
(for example, it does allow casting from complex to float).
This method acts as a patch to context.cast so that it allows
complex to real/int casts.
"""
if (isinstance(fromty, types.Complex) and
not isinstance(toty, types.Complex)):
# attempt conversion of the real part to the specified type.
# note that NumPy issues a warning in this kind of conversions
newty = fromty.underlying_float
attr = self.context.get_getattr(fromty, 'real')
val = attr(self.context, self.builder, fromty, val, 'real')
fromty = newty
# let the regular cast do the rest...
return self.context.cast(self.builder, val, fromty, toty)
def _ufunc_db_function(ufunc):
"""Use the ufunc loop type information to select the code generation
function from the table provided by the dict_of_kernels. The dict
of kernels maps the loop identifier to a function with the
following signature: (context, builder, signature, args).
The loop type information has the form 'AB->C'. The letters to the
left of '->' are the input types (specified as NumPy letter
types). The letters to the right of '->' are the output
types. There must be 'ufunc.nin' letters to the left of '->', and
'ufunc.nout' letters to the right.
For example, a binary float loop resulting in a float, will have
the following signature: 'ff->f'.
A given ufunc implements many loops. The list of loops implemented
for a given ufunc can be accessed using the 'types' attribute in
the ufunc object. The NumPy machinery selects the first loop that
fits a given calling signature (in our case, what we call the
outer_sig). This logic is mimicked by 'ufunc_find_matching_loop'.
"""
class _KernelImpl(_Kernel):
def __init__(self, context, builder, outer_sig):
super(_KernelImpl, self).__init__(context, builder, outer_sig)
loop = ufunc_find_matching_loop(
ufunc, outer_sig.args + (outer_sig.return_type,))
self.fn = ufunc_db.get_ufunc_info(ufunc).get(loop.ufunc_sig)
self.inner_sig = typing.signature(
*(loop.outputs + loop.inputs))
if self.fn is None:
msg = "Don't know how to lower ufunc '{0}' for loop '{1}'"
raise NotImplementedError(msg.format(ufunc.__name__, loop))
def generate(self, *args):
isig = self.inner_sig
osig = self.outer_sig
cast_args = [self.cast(val, inty, outty)
for val, inty, outty in zip(args, osig.args,
isig.args)]
with force_error_model(self.context, 'numpy'):
res = self.fn(self.context, self.builder, isig, cast_args)
dmm = self.context.data_model_manager
res = dmm[isig.return_type].from_return(self.builder, res)
return self.cast(res, isig.return_type, osig.return_type)
return _KernelImpl
################################################################################
# Helper functions that register the ufuncs
_kernels = {} # Temporary map from ufunc's to their kernel implementation class
def register_unary_ufunc_kernel(ufunc, kernel):
def unary_ufunc(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel)
def unary_ufunc_no_explicit_output(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
_any = types.Any
# (array or scalar, out=array)
lower(ufunc, _any, types.Array)(unary_ufunc)
# (array or scalar)
lower(ufunc, _any)(unary_ufunc_no_explicit_output)
_kernels[ufunc] = kernel
def register_binary_ufunc_kernel(ufunc, kernel):
def binary_ufunc(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel)
def binary_ufunc_no_explicit_output(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
_any = types.Any
# (array or scalar, array o scalar, out=array)
lower(ufunc, _any, _any, types.Array)(binary_ufunc)
# (scalar, scalar)
lower(ufunc, _any, _any)(binary_ufunc_no_explicit_output)
_kernels[ufunc] = kernel
def register_unary_operator_kernel(operator, kernel, inplace=False):
assert not inplace # are there any inplace unary operators?
def lower_unary_operator(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
_arr_kind = types.Array
lower(operator, _arr_kind)(lower_unary_operator)
def register_binary_operator_kernel(op, kernel, inplace=False):
def lower_binary_operator(context, builder, sig, args):
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=False)
def lower_inplace_operator(context, builder, sig, args):
# The visible signature is (A, B) -> A
# The implementation's signature (with explicit output)
# is (A, B, A) -> A
args = tuple(args) + (args[0],)
sig = typing.signature(sig.return_type, *sig.args + (sig.args[0],))
return numpy_ufunc_kernel(context, builder, sig, args, kernel,
explicit_output=True)
_any = types.Any
_arr_kind = types.Array
formal_sigs = [(_arr_kind, _arr_kind), (_any, _arr_kind), (_arr_kind, _any)]
for sig in formal_sigs:
if not inplace:
lower(op, *sig)(lower_binary_operator)
else:
lower(op, *sig)(lower_inplace_operator)
################################################################################
# Use the contents of ufunc_db to initialize the supported ufuncs
for ufunc in ufunc_db.get_ufuncs():
if ufunc.nin == 1:
register_unary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))
elif ufunc.nin == 2:
register_binary_ufunc_kernel(ufunc, _ufunc_db_function(ufunc))
else:
raise RuntimeError("Don't know how to register ufuncs from ufunc_db with arity > 2")
@lower(operator.pos, types.Array)
def array_positive_impl(context, builder, sig, args):
'''Lowering function for +(array) expressions. Defined here
(numba.targets.npyimpl) since the remaining array-operator
lowering functions are also registered in this module.
'''
class _UnaryPositiveKernel(_Kernel):
def generate(self, *args):
[val] = args
return val
return numpy_ufunc_kernel(context, builder, sig, args,
_UnaryPositiveKernel, explicit_output=False)
for _op_map in (npydecl.NumpyRulesUnaryArrayOperator._op_map,
npydecl.NumpyRulesArrayOperator._op_map,
):
for operator, ufunc_name in _op_map.items():
ufunc = getattr(np, ufunc_name)
kernel = _kernels[ufunc]
if ufunc.nin == 1:
register_unary_operator_kernel(operator, kernel)
elif ufunc.nin == 2:
register_binary_operator_kernel(operator, kernel)
else:
raise RuntimeError("There shouldn't be any non-unary or binary operators")
for _op_map in (npydecl.NumpyRulesInplaceArrayOperator._op_map,
):
for operator, ufunc_name in _op_map.items():
ufunc = getattr(np, ufunc_name)
kernel = _kernels[ufunc]
if ufunc.nin == 1:
register_unary_operator_kernel(operator, kernel, inplace=True)
elif ufunc.nin == 2:
register_binary_operator_kernel(operator, kernel, inplace=True)
else:
raise RuntimeError("There shouldn't be any non-unary or binary operators")
del _kernels
@intrinsic
def _make_dtype_object(typingctx, desc):
"""Given a string or NumberClass description *desc*, returns the dtype object.
"""
def from_nb_type(nb_type):
return_type = types.DType(nb_type)
sig = return_type(desc)
def codegen(context, builder, signature, args):
# All dtype objects are dummy values in LLVM.
# They only exist in the type level.
return context.get_dummy_value()
return sig, codegen
if isinstance(desc, types.Literal):
# Convert the str description into np.dtype then to numba type.
nb_type = from_dtype(np.dtype(desc.literal_value))
return from_nb_type(nb_type)
elif isinstance(desc, types.functions.NumberClass):
thestr = str(desc.dtype)
# Convert the str description into np.dtype then to numba type.
nb_type = from_dtype(np.dtype(thestr))
return from_nb_type(nb_type)
@overload(np.dtype)
def numpy_dtype(desc):
"""Provide an implementation so that numpy.dtype function can be lowered.
"""
if isinstance(desc, (types.Literal, types.functions.NumberClass)):
def imp(desc):
return _make_dtype_object(desc)
return imp
else:
raise TypeError('unknown dtype descriptor: {}'.format(desc))
| [
[
[
67,
71
]
],
[
[
79,
82
]
],
[
[
90,
99
]
],
[
[
124,
134
],
[
2711,
2721
],
[
3899,
3909
]
],
[
[
164,
174
],
[
2220,
2222
],
[
2314,
2316
],
[
2968,
2970
],
[
2984,
2986
],
[
3402,
3404
],
[
4457,
4459
],
[
4473,
4475
],
[
4610,
4612
],
[
10589,
10591
],
[
13441,
13443
]
],
[
[
183,
194
],
[
22086,
22088
],
[
22572,
22574
],
[
23898,
23900
],
[
23575,
23577
],
[
23832,
23834
]
],
[
[
202,
210
],
[
21338,
21346
]
],
[
[
226,
234
]
],
[
[
236,
244
]
],
[
[
246,
254
],
[
21014,
21022
],
[
17068,
17076
]
],
[
[
256,
264
],
[
11138,
11146
]
],
[
[
287,
295
],
[
588,
596
]
],
[
[
297,
313
],
[
14524,
14540
]
],
[
[
315,
332
],
[
17707,
17724
]
],
[
[
348,
354
],
[
13888,
13894
],
[
17148,
17154
],
[
20361,
20367
]
],
[
[
356,
361
],
[
6913,
6918
],
[
6924,
6929
],
[
6936,
6941
],
[
6951,
6956
],
[
6997,
7002
],
[
7009,
7014
],
[
7024,
7029
],
[
21352,
21357
],
[
2186,
2191
],
[
2295,
2300
],
[
2942,
2947
],
[
4430,
4435
],
[
6114,
6119
],
[
6301,
6306
],
[
6664,
6669
],
[
6690,
6695
],
[
6717,
6722
],
[
9102,
9107
],
[
11851,
11856
],
[
13232,
13237
],
[
14008,
14013
],
[
15163,
15168
],
[
15215,
15220
],
[
18627,
18632
],
[
18696,
18701
],
[
19215,
19220
],
[
19306,
19311
],
[
19787,
19792
],
[
20562,
20567
],
[
20588,
20593
],
[
23458,
23463
],
[
23668,
23673
],
[
24042,
24047
],
[
24057,
24062
],
[
9180,
9185
],
[
23142,
23147
]
],
[
[
363,
370
],
[
2349,
2356
],
[
4576,
4583
],
[
4822,
4829
],
[
6400,
6407
],
[
6467,
6474
],
[
9274,
9281
],
[
9440,
9447
],
[
9573,
9580
],
[
10288,
10295
],
[
10514,
10521
],
[
12120,
12127
],
[
12187,
12194
],
[
14120,
14127
]
],
[
[
372,
385
]
],
[
[
387,
392
]
],
[
[
421,
445
],
[
16954,
16978
]
],
[
[
447,
467
],
[
11332,
11352
]
],
[
[
469,
479
],
[
23564,
23574
],
[
23821,
23831
]
],
[
[
501,
508
],
[
21891,
21898
],
[
21953,
21960
],
[
22432,
22439
]
],
[
[
533,
541
],
[
23889,
23897
]
],
[
[
543,
552
],
[
22947,
22956
]
],
[
[
569,
575
],
[
10741,
10747
]
],
[
[
577,
585
],
[
607,
615
]
],
[
[
599,
604
],
[
21332,
21337
],
[
18677,
18682
],
[
18750,
18755
],
[
19281,
19286
],
[
19360,
19365
],
[
19803,
19808
],
[
20745,
20750
],
[
20810,
20815
]
],
[
[
1220,
1241
],
[
2430,
2451
]
],
[
[
1361,
1374
],
[
6749,
6762
]
],
[
[
2690,
2710
],
[
4722,
4742
]
],
[
[
3886,
3898
],
[
6533,
6545
],
[
12253,
12265
]
],
[
[
5740,
5757
],
[
13037,
13054
],
[
13372,
13389
]
],
[
[
6891,
6910
],
[
10422,
10441
]
],
[
[
7041,
7056
],
[
10405,
10420
]
],
[
[
8821,
8833
],
[
13277,
13289
]
],
[
[
12418,
12436
],
[
21750,
21768
],
[
18361,
18379
],
[
18502,
18520
],
[
18948,
18966
],
[
19090,
19108
],
[
19658,
19676
],
[
19993,
20011
],
[
20438,
20456
]
],
[
[
14661,
14668
],
[
16793,
16800
],
[
21645,
21652
]
],
[
[
15706,
15724
],
[
21103,
21121
],
[
21199,
21217
]
],
[
[
18167,
18175
],
[
22119,
22127
],
[
22605,
22613
],
[
22936,
22944
],
[
18806,
18814
],
[
19423,
19431
]
],
[
[
18252,
18279
],
[
21068,
21095
]
],
[
[
18837,
18865
],
[
21163,
21191
]
],
[
[
19454,
19484
],
[
22174,
22204
],
[
22660,
22690
]
],
[
[
19858,
19889
],
[
22264,
22295
],
[
22764,
22795
]
],
[
[
21005,
21010
],
[
21044,
21049
],
[
21096,
21101
],
[
21122,
21127
],
[
21139,
21144
],
[
21192,
21197
],
[
21218,
21223
]
],
[
[
21369,
21388
]
],
[
[
21879,
21886
],
[
22045,
22052
]
],
[
[
22021,
22029
],
[
22205,
22213
],
[
22296,
22304
]
],
[
[
22031,
22041
],
[
22090,
22100
]
],
[
[
22070,
22075
],
[
22128,
22133
],
[
22146,
22151
],
[
22236,
22241
]
],
[
[
22110,
22116
],
[
22215,
22221
],
[
22306,
22312
]
],
[
[
22420,
22427
],
[
22531,
22538
]
],
[
[
22507,
22515
],
[
22691,
22699
],
[
22796,
22804
]
],
[
[
22517,
22527
],
[
22576,
22586
]
],
[
[
22556,
22561
],
[
22614,
22619
],
[
22632,
22637
],
[
22736,
22741
]
],
[
[
22596,
22602
],
[
22701,
22707
],
[
22806,
22812
]
],
[
[
22961,
22979
],
[
24130,
24148
]
],
[
[
23912,
23923
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import os
import re
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('oauth_api')
setup(
name="django-oauth-api",
version=version,
description="OAuth API for Django using Django Rest Framework",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='django djangorestframework oauth oauth2 oauthlib',
author='Tomi Pajunen',
author_email='tomi@madlab.fi',
url='https://github.com/eofs/django-oauth-api',
license='BSD',
packages=find_packages(),
include_package_data=True,
test_suite='runtests',
install_requires=[
'django>=1.11',
'oauthlib==2.0.7',
],
zip_safe=False,
)
| [
[
[
70,
75
],
[
401,
406
]
],
[
[
77,
90
],
[
1143,
1156
]
],
[
[
98,
100
],
[
242,
244
]
],
[
[
108,
110
],
[
298,
300
]
],
[
[
117,
128
],
[
374,
385
]
],
[
[
364,
371
],
[
449,
456
]
]
] |
from django.contrib import admin
from events.models import Place, Event, Attendance
# Register your models here.
class EventAdmin(admin.ModelAdmin):
filter_horizontal = ('expected_members', )
class AttendanceAdmin(admin.ModelAdmin):
list_display = ('event__name', 'member', 'attendance', 'proxy_to', 'accepted',)
list_filter = ('event__name',)
def event__name(self, obj):
return str(obj.event)
admin.site.register(Place)
admin.site.register(Attendance, AttendanceAdmin)
admin.site.register(Event, EventAdmin)
| [
[
[
27,
32
],
[
131,
136
],
[
220,
225
],
[
422,
427
],
[
449,
454
],
[
498,
503
]
],
[
[
59,
64
],
[
442,
447
]
],
[
[
66,
71
],
[
518,
523
]
],
[
[
73,
83
],
[
469,
479
]
],
[
[
120,
130
],
[
525,
535
]
],
[
[
204,
219
],
[
481,
496
]
]
] |
# -*- coding: utf-8 -*-
# Created at 03/09/2020
__author__ = 'raniys'
import math
import pytest
from factorial_example import factorial_function
@pytest.mark.sample
def test_factorial_functionality():
print("Inside test_factorial_functionality")
assert factorial_function(0) == 1
assert factorial_function(4) == 24
@pytest.mark.sample
def test_standard_library():
print("Inside test_standard_library")
for i in range(5):
# verify whether factorial is calculated correctly
# by checking against result against standard
# library - math.factorial()
assert math.factorial(i) == factorial_function(i)
@pytest.mark.sample
def test_negative_number():
print("Inside test_negative_number")
# This test case would pass if Assertion Error
# is raised. In this case, the input number is negative
# hence, the test case passes
with pytest.raises(AssertionError):
factorial_function(-10)
| [
[
[
49,
59
]
],
[
[
79,
83
],
[
617,
621
]
],
[
[
92,
98
],
[
152,
158
],
[
337,
343
],
[
663,
669
],
[
906,
912
]
],
[
[
130,
148
],
[
268,
286
],
[
306,
324
],
[
638,
656
],
[
945,
963
]
],
[
[
175,
203
]
],
[
[
360,
381
]
],
[
[
686,
706
]
]
] |
import requests
import json
# Get Current Patch
def getCurrentVersion():
versionResponse = requests.get("https://ddragon.leagueoflegends.com/api/versions.json")
version_patch_RawData = versionResponse.json()
currentVersion = version_patch_RawData[0]
print(currentVersion)
return currentVersion
#champions, items, summoner_spells, spells
def GetDDragonData_Champions():
version = getCurrentVersion()
#Champions Data
response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/champion.json")
allChampionRawData = json.loads(response.text)
ChampionIdToName = {}
for key,champion in allChampionRawData['data'].items():
ChampionIdToName[int(champion['key'])] = champion['name']
print(ChampionIdToName)
return ChampionIdToName
def GetDDragonData_Items():
version = getCurrentVersion()
response = requests.get("http://ddragon.leagueoflegends.com/cdn/"+version+"/data/en_US/item.json")
allItemsRawData = json.loads(response.text)
QuickPrinter(allItemsRawData)
#Items Data
ItemIdToName = {}
for key,item in allItemsRawData['data'].items():
ItemIdToName[int(key)] = item['name']
print(ItemToToName)
return ItemIdToName
def QuickPrinter(String_to_Print):
print(json.dumps(String_to_Print, indent=4, sort_keys=True))
#main()
version = getCurrentVersion()
GetDDragonData_Champions()
GetDDragonData_Items() | [
[
[
7,
15
],
[
95,
103
],
[
459,
467
],
[
889,
897
]
],
[
[
23,
27
],
[
576,
580
],
[
999,
1003
],
[
1291,
1295
]
],
[
[
52,
69
],
[
1365,
1382
],
[
404,
421
],
[
854,
871
]
],
[
[
362,
386
],
[
1385,
1409
]
],
[
[
816,
836
],
[
1412,
1432
]
],
[
[
1250,
1262
],
[
1029,
1041
]
],
[
[
1355,
1362
]
]
] |
import numpy as np
import nibabel as nib
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
from my_functions.matrix_stuff import *
def manual_rigid_body(fname = 'example_brain.nii.gz',
outmat = 'transformation.mat',
outimg = 'example_brain_transformed.nii.gz',
theta = np.radians([0,0,0]),
translation_vec = [0,0,0],
type = 'rotation',
flip_coordinates = [True, False, False]):
"""
Function to perform a rigid body transformation based on manually determined parameters.
Args:
- fname (str): filepath to input nifti image (.nii.gz)
- outmat (str): filepath of output 4x4 transformation matrix (.mat)
- outimg (str): filepath of transformed output image (.nii.gz)
- theta (np.array): vector of rotation angles in x,y,z dimension (in radians)
- translation_vec (np.array): vector for translation in x,y,z (in image coordinates)
- type (str): can be 'rotation' or 'translation' or 'rotation_translation'
- flip_coordinates (boolean vector): indicates for which axis the sign of the offset needs to be flipped
Returns:
- M (np.array): output 4x4 transformation matrix
- M is written to outmat
- the output image (outimg) is written out
Note on flip_coordinates:
Voxel coordinates in the image are expected to increase in the following directions
(it's similar to determining the reorient-command):
- first dimension: left -> right
- second dimension: posterir -> anterior
- third dimension: inferior -> superior
if they go the other way, change input variable accordingly, e.g.:
flip_coordinates = [True, False, False]
"""
# get sform from image to determine offset of coordinate-system
img = nib.load(fname)
aff = img.get_affine()
offset = aff[0:3,3]
# which type of manipulation is requested
if type == 'rotation':
print('do rotation only')
M = rotation(theta, offset, flip_coordinates)
elif type == 'translation':
print('do translation only')
M = vector_to_translation_matrix(translation_vec)
elif type == 'rotation_translation':
print('do combined rotation and translation')
M = rotation_translation(theta, translation_vec, offset, flip_coordinates)
# save output matrix
print('output matrix: ', M)
print('save in: ', outmat)
save_matrix4x4(M, outmat)
# apply transformation to input image
applywarp_command = "applywarp -i " + fname + " -r " + fname + " --premat=" + outmat + " --interp=nn -o " + outimg
print('run flirt: ', applywarp_command)
os.system(applywarp_command)
return M
| [
[
[
7,
18
],
[
376,
378
]
],
[
[
26,
40
],
[
1933,
1936
]
],
[
[
48,
50
],
[
78,
80
],
[
94,
96
],
[
2796,
2798
]
],
[
[
58,
61
],
[
62,
65
]
],
[
[
172,
173
],
[
2120,
2128
],
[
2243,
2271
],
[
2396,
2416
],
[
2560,
2574
]
],
[
[
179,
196
]
]
] |
from sys import maxsize
class Contact:
def __init__(self, first_name=None, middle_name=None, last_name=None, nickname=None,
photo_path=None, photo_delete=False, title=None, company=None, address=None,
telephones_all=None, telephone_home=None,
telephone_mobile=None, telephone_work=None, telephone_fax=None, emails_all=None,
email=None, email2=None, email3=None,
homepage=None, birthday_day=None, birthday_month=None, birthday_year=None, anniversary_day=None,
anniversary_month=None, anniversary_year=None, group=None, secondary_address=None,
secondary_telephone_home=None, secondary_notes=None, id_contact=None):
self.first_name = first_name
self.middle_name = middle_name
self.last_name = last_name
self.nickname = nickname
self.photo_path = photo_path
self.photo_delete = photo_delete
self.title = title
self.company = company
self.address = address
self.telephones_all = telephones_all
self.telephone_home = telephone_home
self.telephone_mobile = telephone_mobile
self.telephone_work = telephone_work
self.telephone_fax = telephone_fax
self.emails_all = emails_all
self.email = email
self.email2 = email2
self.email3 = email3
self.homepage = homepage
self.birthday_day = birthday_day
self.birthday_month = birthday_month
self.birthday_year = birthday_year
self.anniversary_day = anniversary_day
self.anniversary_month = anniversary_month
self.anniversary_year = anniversary_year
self.group = group
self.secondary_address = secondary_address
self.secondary_telephone_home = secondary_telephone_home
self.secondary_notes = secondary_notes
self.id = id_contact
def __repr__(self):
return "%s: %s %s, %s" % (self.id, self.first_name, self.last_name, self.address)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and \
self.check_for_none(self.first_name, other.first_name) and \
self.check_for_none(self.last_name, other.last_name) and \
self.check_for_none(self.address, other.address)
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
def check_for_none(self, first, second):
return first == second or (first is None and second == "") or (first == "" and second is None)
| [
[
[
16,
23
],
[
2479,
2486
]
],
[
[
32,
39
]
]
] |
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .base import Base
class MapperBase():
user = os.getenv("MYSQL_USER")
key = os.getenv("MYSQL_KEY")
host = os.getenv("MYSQL_HOST")
port = os.getenv("MYSQL_PORT")
def __init__(self, database):
self.db = database
if database == 'test':
self.url = 'sqlite:///:memory:'
else:
self.url = \
'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(
self.user,
self.key,
self.host,
self.port,
self.db,
)
self.engine = create_engine(
self.url,
connect_args={'use_pure': True}
)
self.session = sessionmaker(bind=self.engine)
self.base = Base
def get_base(self):
return self.base
def get_engine(self):
return self.engine
def get_session(self):
return self.session()
| [
[
[
7,
9
],
[
145,
147
],
[
179,
181
],
[
213,
215
],
[
248,
250
]
],
[
[
34,
47
],
[
705,
718
]
],
[
[
75,
87
],
[
819,
831
]
],
[
[
107,
111
],
[
870,
874
]
],
[
[
120,
130
]
]
] |
import numpy as np
import matplotlib.pyplot as plt
import argparse
def extract_name(word: str):
return word.split('=')[-1]
def extract_info(filename: str):
filename_splitted = filename.split('_')
assert len(filename_splitted) == 7
p = float(extract_name(filename_splitted[1]))
iterations = int(extract_name(filename_splitted[2]))
size = int(extract_name(filename_splitted[3]))
G = int(extract_name(filename_splitted[4]))
return p, iterations, size, G
def load_metrics(filename: str) -> list:
with open(filename, 'r') as f:
return [float(line.strip()) for line in f]
def plot_metrics(filename: str, metrics: list, output_path: str = None):
p, iterations, size, G = extract_info(filename)
x = np.linspace(0, iterations, len(metrics))
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.figure(figsize=(8, 5))
plt.grid(True, alpha=0.3)
plt.plot(x, metrics, label=f'p = {p}, N = {size}, G = {G}')
plt.ylabel(r'$\rho$', fontsize=14)
plt.xlabel('$t$', fontsize=14)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.legend(fontsize=13)
if output_path is not None:
plt.savefig(output_path, bbox_inches='tight')
else:
plt.show()
def main():
parser = argparse.ArgumentParser(description='Plot positive edge density (rho)')
parser.add_argument('--metrics-file', type=str, required=True, help='Path to calculated positive edge density')
parser.add_argument('--output-figure', type=str, required=False, default=None, help='Where to save output figure')
args = parser.parse_args()
metrics = load_metrics(args.metrics_file)
plot_metrics(args.metrics_file, metrics, args.output_figure)
if __name__ == '__main__':
main()
| [
[
[
7,
18
],
[
751,
753
]
],
[
[
26,
50
],
[
797,
800
],
[
829,
832
],
[
864,
867
],
[
895,
898
],
[
925,
928
],
[
989,
992
],
[
1028,
1031
],
[
1063,
1066
],
[
1091,
1094
],
[
1119,
1122
],
[
1183,
1186
],
[
1247,
1250
]
],
[
[
58,
66
],
[
1285,
1293
]
],
[
[
73,
85
],
[
261,
273
],
[
318,
330
],
[
369,
381
],
[
417,
429
]
],
[
[
135,
147
],
[
720,
732
]
],
[
[
493,
505
],
[
1637,
1649
]
],
[
[
622,
634
],
[
1673,
1685
]
],
[
[
1264,
1268
],
[
1767,
1771
]
]
] |
from decimal import Decimal
from urllib.parse import urlparse
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django_scopes.forms import SafeModelMultipleChoiceField
from pretix.api.models import WebHook
from pretix.api.webhooks import get_all_webhook_events
from pretix.base.forms import I18nModelForm, SettingsForm
from pretix.base.forms.widgets import SplitDateTimePickerWidget
from pretix.base.models import (
Device, EventMetaProperty, Gate, GiftCard, Organizer, Team,
)
from pretix.control.forms import ExtFileField, SplitDateTimeField
from pretix.control.forms.event import SafeEventMultipleChoiceField
from pretix.multidomain.models import KnownDomain
class OrganizerForm(I18nModelForm):
error_messages = {
'duplicate_slug': _("This slug is already in use. Please choose a different one."),
}
class Meta:
model = Organizer
fields = ['name', 'slug']
def clean_slug(self):
slug = self.cleaned_data['slug']
if Organizer.objects.filter(slug__iexact=slug).exists():
raise forms.ValidationError(
self.error_messages['duplicate_slug'],
code='duplicate_slug',
)
return slug
class OrganizerDeleteForm(forms.Form):
error_messages = {
'slug_wrong': _("The slug you entered was not correct."),
}
slug = forms.CharField(
max_length=255,
label=_("Event slug"),
)
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
def clean_slug(self):
slug = self.cleaned_data.get('slug')
if slug != self.organizer.slug:
raise forms.ValidationError(
self.error_messages['slug_wrong'],
code='slug_wrong',
)
return slug
class OrganizerUpdateForm(OrganizerForm):
def __init__(self, *args, **kwargs):
self.domain = kwargs.pop('domain', False)
self.change_slug = kwargs.pop('change_slug', False)
kwargs.setdefault('initial', {})
self.instance = kwargs['instance']
if self.domain and self.instance:
initial_domain = self.instance.domains.first()
if initial_domain:
kwargs['initial'].setdefault('domain', initial_domain.domainname)
super().__init__(*args, **kwargs)
if not self.change_slug:
self.fields['slug'].widget.attrs['readonly'] = 'readonly'
if self.domain:
self.fields['domain'] = forms.CharField(
max_length=255,
label=_('Custom domain'),
required=False,
help_text=_('You need to configure the custom domain in the webserver beforehand.')
)
def clean_domain(self):
d = self.cleaned_data['domain']
if d:
if d == urlparse(settings.SITE_URL).hostname:
raise ValidationError(
_('You cannot choose the base domain of this installation.')
)
if KnownDomain.objects.filter(domainname=d).exclude(organizer=self.instance.pk,
event__isnull=True).exists():
raise ValidationError(
_('This domain is already in use for a different event or organizer.')
)
return d
def clean_slug(self):
if self.change_slug:
return self.cleaned_data['slug']
return self.instance.slug
def save(self, commit=True):
instance = super().save(commit)
if self.domain:
current_domain = instance.domains.first()
if self.cleaned_data['domain']:
if current_domain and current_domain.domainname != self.cleaned_data['domain']:
current_domain.delete()
KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain'])
elif not current_domain:
KnownDomain.objects.create(organizer=instance, domainname=self.cleaned_data['domain'])
elif current_domain:
current_domain.delete()
instance.cache.clear()
for ev in instance.events.all():
ev.cache.clear()
return instance
class EventMetaPropertyForm(forms.ModelForm):
class Meta:
model = EventMetaProperty
fields = ['name', 'default', 'required', 'protected', 'allowed_values']
widgets = {
'default': forms.TextInput()
}
class TeamForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all().order_by(
'-has_subevents', '-date_from'
)
class Meta:
model = Team
fields = ['name', 'all_events', 'limit_events', 'can_create_events',
'can_change_teams', 'can_change_organizer_settings',
'can_manage_gift_cards',
'can_change_event_settings', 'can_change_items',
'can_view_orders', 'can_change_orders',
'can_view_vouchers', 'can_change_vouchers']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events',
'class': 'scrolling-multiple-choice scrolling-multiple-choice-large',
}),
}
field_classes = {
'limit_events': SafeEventMultipleChoiceField
}
def clean(self):
data = super().clean()
if self.instance.pk and not data['can_change_teams']:
if not self.instance.organizer.teams.exclude(pk=self.instance.pk).filter(
can_change_teams=True, members__isnull=False
).exists():
raise ValidationError(_('The changes could not be saved because there would be no remaining team with '
'the permission to change teams and permissions.'))
return data
class GateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
kwargs.pop('organizer')
super().__init__(*args, **kwargs)
class Meta:
model = Gate
fields = ['name', 'identifier']
class DeviceForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all().order_by(
'-has_subevents', '-date_from'
)
self.fields['gate'].queryset = organizer.gates.all()
def clean(self):
d = super().clean()
if not d['all_events'] and not d['limit_events']:
raise ValidationError(_('Your device will not have access to anything, please select some events.'))
return d
class Meta:
model = Device
fields = ['name', 'all_events', 'limit_events', 'security_profile', 'gate']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events',
'class': 'scrolling-multiple-choice scrolling-multiple-choice-large',
}),
}
field_classes = {
'limit_events': SafeEventMultipleChoiceField
}
class OrganizerSettingsForm(SettingsForm):
auto_fields = [
'contact_mail',
'imprint_url',
'organizer_info_text',
'event_list_type',
'event_list_availability',
'organizer_homepage_text',
'organizer_link_back',
'organizer_logo_image_large',
'giftcard_length',
'giftcard_expiry_years',
'locales',
'region',
'event_team_provisioning',
'primary_color',
'theme_color_success',
'theme_color_danger',
'theme_color_background',
'theme_round_borders',
'primary_font'
]
organizer_logo_image = ExtFileField(
label=_('Header image'),
ext_whitelist=(".png", ".jpg", ".gif", ".jpeg"),
max_size=10 * 1024 * 1024,
required=False,
help_text=_('If you provide a logo image, we will by default not show your organization name '
'in the page header. By default, we show your logo with a size of up to 1140x120 pixels. You '
'can increase the size with the setting below. We recommend not using small details on the picture '
'as it will be resized on smaller screens.')
)
favicon = ExtFileField(
label=_('Favicon'),
ext_whitelist=(".ico", ".png", ".jpg", ".gif", ".jpeg"),
required=False,
max_size=1 * 1024 * 1024,
help_text=_('If you provide a favicon, we will show it instead of the default pretix icon. '
'We recommend a size of at least 200x200px to accommodate most devices.')
)
class WebHookForm(forms.ModelForm):
events = forms.MultipleChoiceField(
widget=forms.CheckboxSelectMultiple,
label=pgettext_lazy('webhooks', 'Event types')
)
def __init__(self, *args, **kwargs):
organizer = kwargs.pop('organizer')
super().__init__(*args, **kwargs)
self.fields['limit_events'].queryset = organizer.events.all()
self.fields['events'].choices = [
(
a.action_type,
mark_safe('{} – <code>{}</code>'.format(a.verbose_name, a.action_type))
) for a in get_all_webhook_events().values()
]
if self.instance:
self.fields['events'].initial = list(self.instance.listeners.values_list('action_type', flat=True))
class Meta:
model = WebHook
fields = ['target_url', 'enabled', 'all_events', 'limit_events']
widgets = {
'limit_events': forms.CheckboxSelectMultiple(attrs={
'data-inverse-dependency': '#id_all_events'
}),
}
field_classes = {
'limit_events': SafeModelMultipleChoiceField
}
class GiftCardCreateForm(forms.ModelForm):
value = forms.DecimalField(
label=_('Gift card value'),
min_value=Decimal('0.00')
)
def __init__(self, *args, **kwargs):
self.organizer = kwargs.pop('organizer')
initial = kwargs.pop('initial', {})
initial['expires'] = self.organizer.default_gift_card_expiry
kwargs['initial'] = initial
super().__init__(*args, **kwargs)
def clean_secret(self):
s = self.cleaned_data['secret']
if GiftCard.objects.filter(
secret__iexact=s
).filter(
Q(issuer=self.organizer) | Q(issuer__gift_card_collector_acceptance__collector=self.organizer)
).exists():
raise ValidationError(
_('A gift card with the same secret already exists in your or an affiliated organizer account.')
)
return s
class Meta:
model = GiftCard
fields = ['secret', 'currency', 'testmode', 'expires', 'conditions']
field_classes = {
'expires': SplitDateTimeField
}
widgets = {
'expires': SplitDateTimePickerWidget,
'conditions': forms.Textarea(attrs={"rows": 2})
}
class GiftCardUpdateForm(forms.ModelForm):
class Meta:
model = GiftCard
fields = ['expires', 'conditions']
field_classes = {
'expires': SplitDateTimeField
}
widgets = {
'expires': SplitDateTimePickerWidget,
'conditions': forms.Textarea(attrs={"rows": 2})
}
| [
[
[
20,
27
],
[
10564,
10571
]
],
[
[
53,
61
],
[
3088,
3096
]
],
[
[
82,
87
],
[
1445,
1450
],
[
1564,
1569
],
[
4583,
4588
],
[
4774,
4779
],
[
4819,
4824
],
[
5562,
5567
],
[
6405,
6410
],
[
6636,
6641
],
[
7387,
7392
],
[
9313,
9318
],
[
9344,
9349
],
[
9386,
9391
],
[
10217,
10222
],
[
10460,
10465
],
[
10490,
10495
],
[
11623,
11628
],
[
11694,
11699
],
[
11970,
11975
],
[
1266,
1271
],
[
1905,
1910
],
[
2748,
2753
]
],
[
[
112,
120
],
[
3097,
3105
]
],
[
[
156,
171
],
[
3148,
3163
],
[
3472,
3487
],
[
6177,
6192
],
[
7102,
7117
],
[
11169,
11184
]
],
[
[
201,
202
],
[
11036,
11037
],
[
11063,
11064
]
],
[
[
239,
248
],
[
9778,
9787
]
],
[
[
286,
303
],
[
966,
967
],
[
1503,
1504
],
[
1619,
1620
],
[
8368,
8369
],
[
8521,
8522
],
[
8955,
8956
],
[
9110,
9111
],
[
10524,
10525
],
[
2819,
2820
],
[
2897,
2898
],
[
3185,
3186
],
[
3509,
3510
],
[
6193,
6194
],
[
7118,
7119
],
[
11202,
11203
]
],
[
[
305,
318
],
[
9430,
9443
]
],
[
[
351,
379
],
[
10394,
10422
]
],
[
[
411,
418
],
[
10088,
10095
]
],
[
[
451,
473
],
[
9873,
9895
]
],
[
[
504,
517
],
[
901,
914
]
],
[
[
519,
531
],
[
7720,
7732
]
],
[
[
570,
595
],
[
11570,
11595
],
[
11917,
11942
]
],
[
[
633,
639
],
[
7248,
7254
]
],
[
[
641,
658
],
[
4633,
4650
]
],
[
[
660,
664
],
[
6572,
6576
]
],
[
[
666,
674
],
[
11363,
11371
],
[
11744,
11752
],
[
10948,
10956
]
],
[
[
676,
685
],
[
1071,
1080
],
[
1194,
1203
]
],
[
[
687,
691
],
[
5131,
5135
]
],
[
[
728,
740
],
[
8340,
8352
],
[
8927,
8939
]
],
[
[
742,
760
],
[
11498,
11516
],
[
11845,
11863
]
],
[
[
800,
828
],
[
5826,
5854
],
[
7651,
7679
]
],
[
[
867,
878
],
[
3279,
3290
],
[
4107,
4118
],
[
4255,
4266
]
],
[
[
887,
900
],
[
2076,
2089
]
],
[
[
1425,
1444
]
],
[
[
2056,
2075
]
],
[
[
4561,
4582
]
],
[
[
4810,
4818
]
],
[
[
6396,
6404
]
],
[
[
6625,
6635
]
],
[
[
7698,
7719
]
],
[
[
9301,
9312
]
],
[
[
10441,
10459
]
],
[
[
11675,
11693
]
]
] |
from setuptools import setup
from setuptools import find_packages
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAJOR_VERSION = '0'
MINOR_VERSION = '11'
MICRO_VERSION = '214'
VERSION = "{}.{}.{}".format(MAJOR_VERSION, MINOR_VERSION, MICRO_VERSION)
setup(name='yagmail',
version=VERSION,
description='Yet Another GMAIL client',
long_description=LONG_DESCRIPTION,
url='https://github.com/kootenpv/yagmail',
author='Pascal van Kooten',
author_email='kootenpv@gmail.com',
license='MIT',
extras_require={
"all": ["keyring"]
},
keywords='email mime automatic html attachment',
entry_points={
'console_scripts': ['yagmail = yagmail.__main__:main']
},
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Communications :: Email',
'Topic :: Communications :: Email :: Email Clients (MUA)',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Debuggers',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Software Distribution',
'Topic :: System :: Systems Administration',
'Topic :: Utilities'
],
packages=find_packages(),
zip_safe=False,
platforms='any')
| [
[
[
23,
28
],
[
266,
271
]
],
[
[
52,
65
],
[
2047,
2060
]
],
[
[
94,
95
],
[
120,
121
]
],
[
[
101,
117
],
[
380,
396
]
],
[
[
129,
142
],
[
220,
233
]
],
[
[
149,
162
],
[
235,
248
]
],
[
[
170,
183
],
[
250,
263
]
],
[
[
192,
199
],
[
302,
309
]
]
] |
from jinja2 import Template
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.sessions import SessionMiddleware
from starlette.responses import PlainTextResponse, HTMLResponse
from starlette_wtf import StarletteForm, CSRFProtectMiddleware, csrf_protect
from wtforms import StringField
from wtforms.validators import DataRequired
class MyForm(StarletteForm):
name = StringField('name', validators=[DataRequired()])
template = Template('''
<html>
<body>
<form method="post" novalidate>
{{ form.csrf_token }}
<div>
{{ form.name(placeholder='Name') }}
{% if form.name.errors -%}
<span>{{ form.name.errors[0] }}</span>
{%- endif %}
</div>
<button type="submit">Submit</button>
</form>
</body>
</html>
''')
app = Starlette(middleware=[
Middleware(SessionMiddleware, secret_key='***REPLACEME1***'),
Middleware(CSRFProtectMiddleware, csrf_secret='***REPLACEME2***')
])
@app.route('/', methods=['GET', 'POST'])
@csrf_protect
async def index(request):
"""GET|POST /: form handler
"""
form = await MyForm.from_formdata(request)
if form.validate_on_submit():
return PlainTextResponse('SUCCESS')
html = template.render(form=form)
return HTMLResponse(html)
| [
[
[
19,
27
],
[
498,
506
]
],
[
[
63,
72
],
[
850,
859
]
],
[
[
106,
116
],
[
877,
887
],
[
943,
953
]
],
[
[
159,
176
],
[
888,
905
]
],
[
[
209,
226
],
[
1236,
1253
]
],
[
[
228,
240
],
[
1315,
1327
]
],
[
[
267,
280
],
[
409,
422
]
],
[
[
282,
303
],
[
954,
975
]
],
[
[
305,
317
],
[
1056,
1068
]
],
[
[
338,
349
],
[
436,
447
]
],
[
[
381,
393
],
[
468,
480
]
],
[
[
402,
408
],
[
1152,
1158
]
],
[
[
487,
495
],
[
1277,
1285
]
],
[
[
844,
847
],
[
1015,
1018
]
],
[
[
1069,
1333
]
]
] |
#подключение библиотек
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel,QVBoxLayout,QHBoxLayout, QMessageBox, QRadioButton
#создание приложения и главного окна
app=QApplication([])
main_win =QWidget()
main_win.setWindowTitle('Конкурс от Crazy People')
question =QLabel("В каком году канал получил золотую кнопку от YouTube?")
btn_answer1 =QRadioButton('2005')
btn_answer2 =QRadioButton('2010')
btn_answer3 =QRadioButton('2015')
btn_answer4 =QRadioButton('2020')
layout_main=QVBoxLayout()
h1=QHBoxLayout()
h2=QHBoxLayout()
h3=QHBoxLayout()
h1.addWidget(question,alignment =Qt.AlignCenter)
h2.addWidget(btn_answer1,alignment =Qt.AlignCenter)
h2.addWidget(btn_answer2,alignment =Qt.AlignCenter)
h3.addWidget(btn_answer3,alignment =Qt.AlignCenter)
h3.addWidget(btn_answer4,alignment =Qt.AlignCenter)
layout_main.addLayout(h1)
layout_main.addLayout(h2)
layout_main.addLayout(h3)
main_win.setLayout(layout_main)
def win ():
win =QMessageBox()
win.setText('Верно!')
win.exec_()
def lose():
lose =QMessageBox()
lose.setText('«Нет, в 2015 году. Вы выиграли фирменный плакат')
lose.exec_()
btn_answer1.clicked.connect(lose)
btn_answer2.clicked.connect(lose)
btn_answer3.clicked.connect(win)
btn_answer4.clicked.connect(lose)
main_win.show()
app.exec_()
| [
[
[
48,
50
],
[
623,
625
],
[
675,
677
],
[
727,
729
],
[
779,
781
],
[
831,
833
]
],
[
[
79,
91
],
[
215,
227
]
],
[
[
93,
100
],
[
242,
249
]
],
[
[
102,
113
]
],
[
[
115,
121
],
[
313,
319
]
],
[
[
122,
133
],
[
525,
536
]
],
[
[
134,
145
],
[
542,
553
],
[
559,
570
],
[
576,
587
]
],
[
[
147,
158
],
[
979,
990
],
[
1057,
1068
]
],
[
[
160,
172
],
[
390,
402
],
[
424,
436
],
[
458,
470
],
[
492,
504
]
],
[
[
211,
214
],
[
1323,
1326
]
],
[
[
232,
240
],
[
252,
260
],
[
925,
933
],
[
1307,
1315
]
],
[
[
303,
311
],
[
603,
611
]
],
[
[
377,
388
],
[
652,
663
],
[
1156,
1167
]
],
[
[
411,
422
],
[
704,
715
],
[
1193,
1204
]
],
[
[
445,
456
],
[
756,
767
],
[
1227,
1238
]
],
[
[
479,
490
],
[
808,
819
],
[
1260,
1271
]
],
[
[
513,
524
],
[
847,
858
],
[
873,
884
],
[
899,
910
],
[
944,
955
]
],
[
[
539,
541
],
[
590,
592
],
[
869,
871
]
],
[
[
556,
558
],
[
639,
641
],
[
691,
693
],
[
895,
897
]
],
[
[
573,
575
],
[
743,
745
],
[
795,
797
],
[
921,
923
]
],
[
[
962,
965
],
[
1255,
1258
]
],
[
[
1039,
1043
],
[
1184,
1188
],
[
1221,
1225
],
[
1288,
1292
]
]
] |
from action_class import Action
place = 'place'
upgrade = 'upgrade'
target = 'target'
top = 'upgrade 1'
middle = 'upgrade 2'
bottom = 'upgrade 3'
ouch_script = [
Action(place, name='sub1', action='sub', position=(708, 540)), # Sub
Action(place, name='sub2', action='sub', position=(984, 545)), # Sub2
Action('start', action='start', cost=0),
Action(place, name='dart1', action='dart', position=(303, 671)), # Dart
Action(place, name='Psi', action='Hero', position=(546, 309)), # Psi
Action(target, name = 'Psi', action='Strong'), # Psi Strong
Action(upgrade, name='sub1', action=bottom), # 001
Action(upgrade, name='sub2', action=bottom), # 001
Action(upgrade, name='sub1', action=middle), # 011
Action(upgrade, name='sub2', action=top), # 101
Action(upgrade, name='sub1', action=middle), # 021
Action(upgrade, name='sub2', action=top), # 201
Action(upgrade, name='sub1', action=bottom), # 022
Action(upgrade, name='sub2', action=bottom), # 202
Action(place, name='alch1', action='alch', position=(1009, 411)), # Alchemist
Action(target, name = 'alch1', action='Strong'), # Strong
Action(upgrade, name='alch1', action=top), # 100
Action(upgrade, name='alch1', action=top), # 200
Action(upgrade, name='sub2', action=bottom), # 203 Sub2
Action(place, name='ace1', action='ace', position=(845, 310)), # Ace
Action(upgrade, name='ace1', action= bottom), # 001
Action(upgrade, name='ace1', action=bottom), # 002
Action(upgrade, name='ace1', action=bottom), # 003
Action(place, name='village1', action='Village', position=(990, 295)), # Village
Action(upgrade, name='village1', action= middle), # 010
Action(upgrade, name='village1', action= middle), # 020
Action(upgrade, name='ace1', action=top), # 103 Ace
Action(upgrade, name='ace1', action=top), # 203
Action(upgrade, name='sub2', action=bottom), # 204 Sub2
Action(upgrade, name='sub1', action=middle), # 023 Sub2
Action(upgrade, name='alch1', action=top), # 300 Alch
Action(upgrade, name='alch1', action=top), # 400
Action(upgrade, name='alch1', action=bottom), # 401
Action(upgrade, name='ace1', action=bottom), # 204 Ace
Action(place, name='sniper1', action='sniper', position=(85, 676)), # Sniper
Action(upgrade, name='sniper1', action= top), # 100
Action(target, name = 'sniper1', action='Strong'),
Action(upgrade, name='sniper1', action=top), # 200
Action(upgrade, name='sniper1', action=top), # 300
Action(upgrade, name='sniper1', action=top), # 400
Action(upgrade, name='sniper1', action=bottom), # 401
Action(upgrade, name='sniper1', action=bottom), # 402
Action('finish', action='finish', cost=0)
] | [
[
[
25,
31
],
[
169,
175
],
[
242,
248
],
[
316,
322
],
[
362,
368
],
[
439,
445
],
[
512,
518
],
[
577,
583
],
[
632,
638
],
[
688,
694
],
[
743,
749
],
[
796,
802
],
[
851,
857
],
[
905,
911
],
[
960,
966
],
[
1016,
1022
],
[
1098,
1104
],
[
1160,
1166
],
[
1213,
1219
],
[
1267,
1273
],
[
1328,
1334
],
[
1402,
1408
],
[
1458,
1464
],
[
1513,
1519
],
[
1571,
1577
],
[
1656,
1662
],
[
1716,
1722
],
[
1777,
1783
],
[
1833,
1839
],
[
1886,
1892
],
[
1946,
1952
],
[
2007,
2013
],
[
2065,
2071
],
[
2119,
2125
],
[
2176,
2182
],
[
2240,
2246
],
[
2321,
2327
],
[
2377,
2383
],
[
2432,
2438
],
[
2488,
2494
],
[
2544,
2550
],
[
2600,
2606
],
[
2659,
2665
],
[
2719,
2725
]
],
[
[
33,
38
],
[
176,
181
],
[
249,
254
],
[
369,
374
],
[
446,
451
],
[
1023,
1028
],
[
1335,
1340
],
[
1578,
1583
],
[
2247,
2252
]
],
[
[
49,
56
],
[
584,
591
],
[
639,
646
],
[
695,
702
],
[
750,
757
],
[
803,
810
],
[
858,
865
],
[
912,
919
],
[
967,
974
],
[
1167,
1174
],
[
1220,
1227
],
[
1274,
1281
],
[
1409,
1416
],
[
1465,
1472
],
[
1520,
1527
],
[
1663,
1670
],
[
1723,
1730
],
[
1784,
1791
],
[
1840,
1847
],
[
1893,
1900
],
[
1953,
1960
],
[
2014,
2021
],
[
2072,
2079
],
[
2126,
2133
],
[
2183,
2190
],
[
2328,
2335
],
[
2439,
2446
],
[
2495,
2502
],
[
2551,
2558
],
[
2607,
2614
],
[
2666,
2673
]
],
[
[
69,
75
],
[
519,
525
],
[
1105,
1111
],
[
2384,
2390
]
],
[
[
88,
91
],
[
779,
782
],
[
887,
890
],
[
1197,
1200
],
[
1250,
1253
],
[
1813,
1816
],
[
1869,
1872
],
[
2044,
2047
],
[
2102,
2105
],
[
2361,
2364
],
[
2471,
2474
],
[
2527,
2530
],
[
2583,
2586
]
],
[
[
106,
112
],
[
724,
730
],
[
832,
838
],
[
1697,
1703
],
[
1757,
1763
],
[
1982,
1988
]
],
[
[
127,
133
],
[
613,
619
],
[
668,
674
],
[
941,
947
],
[
996,
1002
],
[
1303,
1309
],
[
1439,
1445
],
[
1494,
1500
],
[
1549,
1555
],
[
1922,
1928
],
[
2156,
2162
],
[
2212,
2218
],
[
2639,
2645
],
[
2698,
2704
]
],
[
[
149,
160
]
]
] |
def run():
my_list = [1, "Hello", True, 4.5]
my_dict = {"firstname":"Facundo", "lastname":"Garcia"}
superList = [
{"firstname":"Facundo", "lastname":"Garcia"},
{"firstname":"Miguel", "lastname":"Torres"},
{"firstname":"José", "lastname":"Rodelo"},
{"firstname":"Susana", "lastname":"Martinez"},
{"firstname":"Luis", "lastname":"Cruz"}
]
superDict = {
"naturalNums": [1,2,3,4,5],
"integerNums": [-1,-2,0,1,2],
"floatingNums": [1.1, 4.5, 6.43]
}
for k, v in superDict.items():
print(k, "-", v)
for innerDict in superList:
for k, v in innerDict.items():
print(k, "-", v)
if __name__ == '__main__':
run() | [
[
[
4,
7
],
[
732,
735
]
]
] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
#Key:
# 1=sandstone 2=c_siltstone 3=f_siltstone
# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
# 8=packstone 9=bafflestone
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {}
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)
def compare_facies_plot(logs, compadre, facies_colors):
"""plot the facies plot as a function of depth for both the prediction
and the actual lithofacies labels.
"""
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[6])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im2, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-2):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[6].set_xlabel(compadre)
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
ax[6].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['WellName'], fontsize=14,y=0.94)
| [
[
[
7,
18
],
[
1071,
1073
],
[
1081,
1083
],
[
3263,
3265
],
[
3273,
3275
],
[
3337,
3339
],
[
3347,
3349
]
],
[
[
26,
50
],
[
1147,
1150
],
[
1690,
1693
],
[
3413,
3416
],
[
4082,
4085
]
],
[
[
58,
85
],
[
917,
923
],
[
3106,
3112
]
],
[
[
122,
141
],
[
1593,
1612
],
[
3985,
4004
]
],
[
[
282,
295
],
[
627,
640
]
],
[
[
375,
388
],
[
673,
686
]
],
[
[
577,
593
],
[
647,
663
]
],
[
[
603,
606
],
[
687,
690
]
],
[
[
608,
613
],
[
664,
669
]
],
[
[
707,
719
]
],
[
[
776,
796
]
],
[
[
2827,
2846
]
]
] |
import wx
import numpy as np
from imagepy.core.engine import Tool, Filter
import scipy.ndimage as nimg
class ScaleTool(Tool):
def __init__(self, plg):
self.plg = plg
self.para = plg.para
self.moving = False
def snap(self, x, y, lim):
plg = self.plg
if abs(x-plg.lt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'l'
if abs(x-plg.rt)<lim and abs(y-(plg.tp+plg.bm)/2)<lim:return 'r'
if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.tp)<lim:return 't'
if abs(x-(plg.lt+plg.rt)/2)<lim and abs(y-plg.bm)<lim:return 'b'
if abs(x-plg.lt)<lim and abs(y-plg.tp)<lim:return 'lt'
if abs(x-plg.rt)<lim and abs(y-plg.bm)<lim:return 'rb'
if abs(x-plg.rt)<lim and abs(y-plg.tp)<lim:return 'rt'
if abs(x-plg.lt)<lim and abs(y-plg.bm)<lim:return 'lb'
if (x-plg.lt)*(x-plg.rt)<0 and (y-plg.tp)*(y-plg.bm)<0:
self.ox, self.oy = x, y
return True
return False
def mouse_down(self, ips, x, y, btn, **key):
lim = 5.0/key['canvas'].get_scale()
self.moving = self.snap(x, y, lim)
print(self.moving)
def mouse_up(self, ips, x, y, btn, **key):
if self.moving : self.plg.preview(ips, self.para)
def mouse_move(self, ips, x, y, btn, **key):
lim = 5.0/key['canvas'].get_scale()
if btn==None:
self.cursor = wx.CURSOR_CROSS
if isinstance(self.snap(x, y, lim), str):
self.cursor = wx.CURSOR_HAND
elif self.moving==True:
self.plg.lt+=x-self.ox
self.plg.rt+=x-self.ox
self.plg.bm+=y-self.oy
self.plg.tp+=y-self.oy
self.ox, self.oy = x, y
self.plg.count()
self.plg.dialog.reset()
ips.update = True
elif self.moving != False:
print("scale_tol.ScaleTool.mouse_move")
if 'l' in self.moving:self.plg.lt = x
if 'r' in self.moving:self.plg.rt = x
if 't' in self.moving:self.plg.tp = y
if 'b' in self.moving:self.plg.bm = y
self.plg.count()
self.plg.dialog.reset()
ips.update = True
class Plugin(Filter):
modal = False
title = 'Scale'
note = ['all', 'auto_msk', 'auto_snap', 'preview']
para = {'kx': 1, 'ky':1, 'ox':0, 'oy':0, 'img':True, 'msk':False}
view = [(float, (-100,100), 3, 'KX', 'kx', ''),
(float, (-100,100), 3, 'KY', 'ky', ''),
(int, (-10000,10000), 0, 'OffX', 'ox', 'pix'),
(int, (-10000,10000), 0, 'OffY', 'oy', 'pix'),
(bool, 'scale image', 'img'),
(bool, 'scale mask', 'msk')]
def draw(self, dc, f, **key):
body = [(self.lt,self.bm),(self.rt,self.bm),
(self.rt,self.tp),(self.lt,self.tp),(self.lt,self.bm)]
dc.SetPen(wx.Pen((0,255,0), width=1, style=wx.SOLID))
dc.DrawLines([f(*i) for i in body])
for i in body:dc.DrawCircle(f(*i),2)
dc.DrawCircle(f(self.lt, (self.tp+self.bm)/2),2)
dc.DrawCircle(f(self.rt, (self.tp+self.bm)/2),2)
dc.DrawCircle(f((self.lt+self.rt)/2, self.tp),2)
dc.DrawCircle(f((self.lt+self.rt)/2, self.bm),2)
def load(self, ips):
self.bufroi = ips.roi
self.lt, self.tp, self.rt, self.bm = 0, 0, ips.size[1], ips.size[0]
if ips.roi!=None:
box = ips.roi.get_box()
if box[0]!=box[2] and box[1]!=box[3]:
self.lt, self.tp, self.rt, self.bm = box
self.orio = ((self.lt+self.rt)/2,(self.tp+self.bm)/2)
self.oriw, self.orih = self.rt - self.lt, self.tp - self.bm
self.para['ox'] = (self.lt+self.rt)/2
self.para['oy'] = (self.tp+self.bm)/2
self.para['kx'] = self.para['ky'] = 1
ips.mark = self
ips.update = True
ips.tool = ScaleTool(self)
return True
def count(self, dir=True):
if dir:
self.para['ox'] = int((self.lt+self.rt)/2)
self.para['oy'] = int((self.tp+self.bm)/2)
self.para['kx'] = (self.rt-self.lt)*1.0/self.oriw
self.para['ky'] = (self.tp-self.bm)*1.0/self.orih
else:
self.lt = self.para['ox']-self.oriw*self.para['kx']/2
self.rt = self.para['ox']+self.oriw*self.para['kx']/2
self.bm = self.para['oy']-self.orih*self.para['ky']/2
self.tp = self.para['oy']+self.orih*self.para['ky']/2
def ok(self, ips, para=None):
Filter.ok(self, ips, para)
ips.mark = None
ips.tool = None
def cancel(self, ips):
Filter.cancel(self, ips)
ips.roi = self.bufroi
ips.mark = None
ips.tool = None
ips.update = 'pix'
def run(self, ips, img, buf, para = None):
if para == None: para = self.para
self.count(False)
trans = np.array([[1/self.para['ky'],0],[0,1/self.para['kx']]])
o = np.array([self.para['oy'], self.para['ox']])
offset = self.orio[::-1]-trans.dot(o)
if self.para['img']:
nimg.affine_transform(img, trans, output=buf, offset=offset)
trans = np.array([[self.para['kx'],0],[0, self.para['ky']]])
offset = o[::-1]-trans.dot(self.orio)
if self.para['msk'] and self.bufroi!=None:ips.roi = self.bufroi.affine(trans, offset)
if self.para['img'] and not ips.get_msk('out') is None:
buf[ips.get_msk('out')] = img[ips.get_msk('out')]
ips.update = True
| [
[
[
7,
9
],
[
1428,
1430
],
[
1528,
1530
],
[
2905,
2907
],
[
2938,
2940
]
],
[
[
17,
28
],
[
4974,
4976
],
[
5042,
5044
],
[
5251,
5253
]
],
[
[
61,
65
],
[
120,
124
]
],
[
[
67,
73
],
[
2242,
2248
],
[
4585,
4591
],
[
4704,
4710
]
],
[
[
81,
102
],
[
5174,
5178
]
],
[
[
110,
119
],
[
3938,
3947
]
],
[
[
2235,
2241
]
]
] |
class std_logic():
"""
class to represent a digital bit allowing for the same 9 values of a bit supported by IEEE 1164.
====== ===============
Value Interpreatation
------ ---------------
U Unitialized
X Unknown
0 Strong 0
1 Strong 1
Z High Impedance
W Weak unknown logic
L Weak logic 0
H Weak logic 1
- Don't care
====== ===============
Refer to https://en.wikipedia.org/wiki/IEEE_1164 for more details
"""
def __init__(self,initialvalue='U'):
"""
:param initialvalue: value to be loaded into the bit
:type initialvalue: int, bool, str
"""
self._value = 'U'
self.set(value=initialvalue)
def __str__(self):
return self._value
def __repr__(self):
base_repr = super().__repr__()
return base_repr[:-2] + ':%s>'%self._value
def __eq__(self, other):
if issubclass(other.__class__,std_logic):
return self._value == other._value
else:
raise NotImplementedError
def __and__(self,other):
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
( 'U', 'U', '0', 'U', 'U', 'U', '0', 'U', 'U' ), -- | U |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | X |
( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | 0 |
( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | 1 |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | Z |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ), -- | W |
( '0', '0', '0', '0', '0', '0', '0', '0', '0' ), -- | L |
( 'U', 'X', '0', '1', 'X', 'X', '0', '1', 'X' ), -- | H |
( 'U', 'X', '0', 'X', 'X', 'X', '0', 'X', 'X' ) -- | - |
"""
if self == std_logic('U'):
if other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
else:
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
else:
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
return_value = std_logic(0)
elif self == std_logic('1') or self == std_logic('H'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __xor__(self, other):
"""
perfroms a bitwise xor operation
:param other:
:return: self ^ other
"""
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
('U', 'U', 'U', 'U', 'U', 'U', 'U', 'U', 'U'), -- | U |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | X |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 |
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | 1 |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | Z |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X'), -- | W |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L |
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X'), -- | H |
('U', 'X', 'X', 'X', 'X', 'X', 'X', 'X', 'X') -- | - |
);
"""
if self == std_logic('U'):
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
else:
return_value = std_logic('X')
elif self == std_logic('1') or self == std_logic('H'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(1)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(0)
else:
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __or__(self,other):
return_value = NotImplemented
if issubclass(other.__class__,std_logic):
"""
truth table from std_logic_1164-body.vhdl
----------------------------------------------------
| U X 0 1 Z W L H - | |
----------------------------------------------------
('U', 'U', 'U', '1', 'U', 'U', 'U', '1', 'U'), -- | U |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | X |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | 0 |
('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | 1 |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | Z |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X'), -- | W |
('U', 'X', '0', '1', 'X', 'X', '0', '1', 'X'), -- | L |
('1', '1', '1', '1', '1', '1', '1', '1', '1'), -- | H |
('U', 'X', 'X', '1', 'X', 'X', 'X', '1', 'X') -- | - |
)
"""
if self == std_logic('U'):
if other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
elif self == std_logic('1') or self == std_logic('H'):
return_value = std_logic(1)
elif self == std_logic('0') or self == std_logic('L'):
if other == std_logic('U'):
return_value = std_logic('U')
elif other == std_logic('0') or other == std_logic('L'):
return_value = std_logic(0)
elif other == std_logic('1') or other == std_logic('H'):
return_value = std_logic(1)
else:
return_value = std_logic('X')
else:
raise TypeError('can not perform operation on classes')
return return_value
def __invert__(self):
"""
truth table from std_logic_1164-body.vhdl
-------------------------------------------------
| U X 0 1 Z W L H - |
-------------------------------------------------
('U', 'X', '1', '0', 'X', 'X', '1', '0', 'X')
"""
if self == std_logic('U'):
return_value = std_logic('U')
elif self == std_logic('X') or self == std_logic('-') or self == std_logic('W') or self == std_logic('Z'):
return_value = std_logic('X')
elif self == std_logic('0') or self == std_logic('L'):
return_value = std_logic(1)
elif self == std_logic('1') or self == std_logic('H'):
return_value = std_logic(0)
return return_value
def set(self,value):
"""
in place value set
:param value: value to be loaded into the bit
:type value: int, bool, str
"""
if isinstance(value,str):
if len(value) != 1:
raise ValueError('length is not 1')
if ((value == 'U') or
(value == 'X') or
(value == '0') or
(value == '1') or
(value == 'Z') or
(value == 'W') or
(value == 'L') or
(value == 'H') or
(value == '-')):
self._value = value
else:
raise ValueError('Unsupported value, only U,X,0,1,Z,W,L,H or - is permitted')
elif isinstance(value,bool):
if value is False:
self._value = '0'
elif value is True:
self._value = '1'
else:
raise ValueError('Illegal boolean value')
elif isinstance(value,int):
if (value == 0) or (value == 1):
self._value = str(value)
assert (self._value == '1') or (self._value == '0')
else:
raise ValueError('Unsupported integer value, only 0 or 1 is permitted')
else:
raise ValueError('Unsupported type')
| [
[
[
8,
17
],
[
1059,
1068
],
[
1278,
1287
],
[
2240,
2249
],
[
2284,
2293
],
[
2311,
2320
],
[
2362,
2371
],
[
2432,
2441
],
[
2472,
2481
],
[
2498,
2507
],
[
2524,
2533
],
[
2550,
2559
],
[
2594,
2603
],
[
2645,
2654
],
[
2690,
2699
],
[
2717,
2726
],
[
2768,
2777
],
[
2838,
2847
],
[
2878,
2887
],
[
2904,
2913
],
[
2951,
2960
],
[
2989,
2998
],
[
3015,
3024
],
[
3059,
3068
],
[
3110,
3119
],
[
3155,
3164
],
[
3182,
3191
],
[
3233,
3242
],
[
3276,
3285
],
[
3303,
3312
],
[
3354,
3363
],
[
3424,
3433
],
[
3777,
3786
],
[
4748,
4757
],
[
4795,
4804
],
[
4835,
4844
],
[
4861,
4870
],
[
4887,
4896
],
[
4913,
4922
],
[
4957,
4966
],
[
5008,
5017
],
[
5080,
5089
],
[
5120,
5129
],
[
5146,
5155
],
[
5190,
5199
],
[
5241,
5250
],
[
5286,
5295
],
[
5313,
5322
],
[
5364,
5373
],
[
5407,
5416
],
[
5434,
5443
],
[
5485,
5494
],
[
5555,
5564
],
[
5595,
5604
],
[
5621,
5630
],
[
5665,
5674
],
[
5716,
5725
],
[
5761,
5770
],
[
5788,
5797
],
[
5839,
5848
],
[
5882,
5891
],
[
5909,
5918
],
[
5960,
5969
],
[
6030,
6039
],
[
6264,
6273
],
[
7237,
7246
],
[
7281,
7290
],
[
7308,
7317
],
[
7359,
7368
],
[
7429,
7438
],
[
7469,
7478
],
[
7495,
7504
],
[
7521,
7530
],
[
7547,
7556
],
[
7591,
7600
],
[
7642,
7651
],
[
7687,
7696
],
[
7714,
7723
],
[
7765,
7774
],
[
7835,
7844
],
[
7875,
7884
],
[
7901,
7910
],
[
7948,
7957
],
[
7986,
7995
],
[
8012,
8021
],
[
8056,
8065
],
[
8107,
8116
],
[
8152,
8161
],
[
8179,
8188
],
[
8230,
8239
],
[
8273,
8282
],
[
8300,
8309
],
[
8351,
8360
],
[
8421,
8430
],
[
8901,
8910
],
[
8944,
8953
],
[
8980,
8989
],
[
9006,
9015
],
[
9032,
9041
],
[
9058,
9067
],
[
9101,
9110
],
[
9137,
9146
],
[
9163,
9172
],
[
9206,
9215
],
[
9240,
9249
],
[
9266,
9275
],
[
9309,
9318
]
]
] |
#!/usr/bin/env python
"""
Predefined bluesky scan plans
"""
import numpy as np
import bluesky.plans as bp
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
from .utility import load_config
#@bpp.run_decorator()
def collect_white_field(experiment, cfg_tomo, atfront=True):
"""
Collect white/flat field images by moving the sample out of the FOV
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
# move sample out of the way
_x = cfg_tomo['fronte_white_ksamX'] if atfront else cfg_tomo['back_white_ksamX']
_z = cfg_tomo['fronte_white_ksamZ'] if atfront else cfg_tomo['back_white_ksamZ']
yield from bps.mv(tomostage.ksamX, _x)
yield from bps.mv(tomostage.ksamZ, _z)
# setup detector
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_white'])
yield from bps.trigger_and_read([det])
# move sample back to FOV
# NOTE:
# not sure is this will work or not...
yield from bps.mv(tomostage.ksamX, cfg_tomo['initial_ksamX'])
yield from bps.mv(tomostage.ksamZ, cfg_tomo['initial_ksamZ'])
#@bpp.run_decorator()
def collect_dark_field(experiment, cfg_tomo):
"""
Collect dark field images by close the shutter
"""
det = experiment.det
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
yield from bps.mv(det.cam.trigger_mode, "Internal")
yield from bps.mv(det.cam.image_mode, "Multiple")
yield from bps.mv(det.cam.num_images, cfg_tomo['n_frames']*cfg_tomo['n_dark'])
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def step_scan(experiment, cfg_tomo):
"""
Collect projects with step motion
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
yield from bps.mv(det.hdf1.nd_array_port, 'PROC1')
yield from bps.mv(det.tiff1.nd_array_port, 'PROC1')
yield from bps.mv(det.proc1.enable, 1)
yield from bps.mv(det.proc1.reset_filter, 1)
yield from bps.mv(det.proc1.num_filter, cfg_tomo['n_frames'])
angs = np.arange(
cfg_tomo['omega_start'],
cfg_tomo['omega_end']+cfg_tomo['omega_step']/2,
cfg_tomo['omega_step'],
)
for ang in angs:
yield from bps.checkpoint()
yield from bps.mv(tomostage.preci, ang)
yield from bps.trigger_and_read([det])
#@bpp.run_decorator()
def fly_scan(experiment, cfg_tomo):
"""
Collect projections with fly motion
"""
det = experiment.det
psofly = experiment.psofly
yield from bps.mv(det.hdf1.nd_array_port, 'PG1')
yield from bps.mv(det.tiff1.nd_array_port, 'PG1')
# we are assuming that the global psofly is available
yield from bps.mv(
psofly.start, cfg_tomo['omega_start'],
psofly.end, cfg_tomo['omega_end'],
psofly.scan_delta, abs(cfg_tomo['omega_step']),
psofly.slew_speed, cfg_tomo['slew_speed'],
)
# taxi
yield from bps.mv(psofly.taxi, "Taxi")
yield from bps.mv(
det.cam.num_images, cfg_tomo['n_projections'],
det.cam.trigger_mode, "Overlapped",
)
# start the fly scan
yield from bps.trigger(det, group='fly')
yield from bps.abs_set(psofly.fly, "Fly", group='fly')
yield from bps.wait(group='fly')
def tomo_scan(experiment, cfg):
"""
Tomography scan plan based on given configuration
"""
# unpack devices
det = experiment.det
tomostage = experiment.tomostage
shutter = experiment.shutter
shutter_suspender = experiment.suspend_shutter
cfg = load_config(cfg) if type(cfg) != dict else cfg
# update the cached motor position in the dict in case exp goes wrong
_cahed_position = experiment.cache_motor_position()
# step 0: preparation
acquire_time = cfg['tomo']['acquire_time']
n_white = cfg['tomo']['n_white']
n_dark = cfg['tomo']['n_dark']
angs = np.arange(
cfg['tomo']['omega_start'],
cfg['tomo']['omega_end']+cfg['tomo']['omega_step']/2,
cfg['tomo']['omega_step'],
)
n_projections = len(angs)
cfg['tomo']['n_projections'] = n_projections
total_images = n_white + n_projections + n_white + n_dark
fp = cfg['output']['filepath']
fn = cfg['output']['fileprefix']
# calculate slew speed for fly scan
# https://github.com/decarlof/tomo2bm/blob/master/flir/libs/aps2bm_lib.py
# TODO: considering blue pixels, use 2BM code as ref
if cfg['tomo']['type'].lower() == 'fly':
scan_time = (acquire_time+cfg['tomo']['readout_time'])*n_projections
slew_speed = (angs.max() - angs.min())/scan_time
cfg['tomo']['slew_speed'] = slew_speed
# need to make sure that the sample out position is the same for both front and back
x0, z0 = tomostage.ksamX.position, tomostage.ksamZ.position
dfx, dfz = cfg['tomo']['sample_out_position']['samX'], cfg['tomo']['sample_out_position']['samZ']
rotang = np.radians(cfg['tomo']['omega_end']-cfg['tomo']['omega_start'])
rotm = np.array([[ np.cos(rotang), np.sin(rotang)],
[-np.sin(rotang), np.cos(rotang)]])
dbxz = np.dot(rotm, np.array([dfx, dfz]))
dbx = dbxz[0] if abs(dbxz[0]) > 1e-8 else 0.0
dbz = dbxz[1] if abs(dbxz[1]) > 1e-8 else 0.0
# now put the value to dict
cfg['tomo']['initial_ksamX'] = x0
cfg['tomo']['initial_ksamZ'] = z0
cfg['tomo']['fronte_white_ksamX'] = x0 + dfx
cfg['tomo']['fronte_white_ksamZ'] = z0 + dfz
cfg['tomo']['back_white_ksamX'] = x0 + dbx
cfg['tomo']['back_white_ksamZ'] = z0 + dbz
@bpp.run_decorator()
@bpp.stage_decorator([det])
def scan_closure():
# open shutter for beam
yield from bps.mv(shutter, 'open')
yield from bps.install_suspender(shutter_suspender)
# config output
for me in [det.tiff1, det.hdf1]:
yield from bps.mv(me.file_path, fp)
yield from bps.mv(me.file_name, fn)
yield from bps.mv(me.file_write_mode, 2)
yield from bps.mv(me.num_capture, total_images)
yield from bps.mv(me.file_template, ".".join([r"%s%s_%06d",cfg['output']['type'].lower()]))
if cfg['output']['type'] in ['tif', 'tiff']:
yield from bps.mv(det.tiff1.enable, 1)
yield from bps.mv(det.tiff1.capture, 1)
yield from bps.mv(det.hdf1.enable, 0)
elif cfg['output']['type'] in ['hdf', 'hdf1', 'hdf5']:
yield from bps.mv(det.tiff1.enable, 0)
yield from bps.mv(det.hdf1.enable, 1)
yield from bps.mv(det.hdf1.capture, 1)
else:
raise ValueError(f"Unsupported output type {cfg['output']['type']}")
# collect front white field
yield from bps.mv(det.cam.frame_type, 0) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=True)
# collect projections
yield from bps.mv(det.cam.frame_type, 1) # for HDF5 dxchange data structure
if cfg['tomo']['type'].lower() == 'step':
yield from step_scan(experiment, cfg['tomo'])
elif cfg['tomo']['type'].lower() == 'fly':
yield from fly_scan(experiment, cfg['tomo'])
else:
raise ValueError(f"Unsupported scan type: {cfg['tomo']['type']}")
# collect back white field
yield from bps.mv(det.cam.frame_type, 2) # for HDF5 dxchange data structure
yield from collect_white_field(experiment, cfg['tomo'], atfront=False)
# collect back dark field
yield from bps.mv(det.cam.frame_type, 3) # for HDF5 dxchange data structure
yield from bps.remove_suspender(shutter_suspender)
yield from bps.mv(shutter, "close")
yield from collect_dark_field(experiment, cfg['tomo'])
return (yield from scan_closure())
| [
[
[
69,
96
],
[
2691,
2693
],
[
4569,
4571
],
[
5613,
5615
],
[
5688,
5690
],
[
5700,
5702
],
[
5716,
5718
],
[
5756,
5758
],
[
5772,
5774
],
[
5801,
5803
],
[
5814,
5816
]
],
[
[
104,
131
]
],
[
[
139,
167
],
[
6246,
6249
],
[
6271,
6274
]
],
[
[
175,
203
],
[
723,
726
],
[
766,
769
],
[
831,
834
],
[
886,
889
],
[
943,
946
],
[
986,
989
],
[
1035,
1038
],
[
1101,
1104
],
[
1157,
1160
],
[
1211,
1214
],
[
1295,
1298
],
[
1424,
1427
],
[
1490,
1493
],
[
1719,
1722
],
[
1774,
1777
],
[
1831,
1834
],
[
1874,
1877
],
[
1923,
1926
],
[
1989,
1992
],
[
2045,
2048
],
[
2099,
2102
],
[
2182,
2185
],
[
2424,
2427
],
[
2479,
2482
],
[
2536,
2539
],
[
2579,
2582
],
[
2628,
2631
],
[
2870,
2873
],
[
2906,
2909
],
[
2954,
2957
],
[
3174,
3177
],
[
3227,
3230
],
[
3340,
3343
],
[
3609,
3612
],
[
3652,
3655
],
[
3805,
3808
],
[
3850,
3853
],
[
3909,
3912
],
[
6373,
6376
],
[
6416,
6419
],
[
6554,
6557
],
[
6602,
6605
],
[
6650,
6653
],
[
6703,
6706
],
[
6763,
6766
],
[
6925,
6928
],
[
6976,
6979
],
[
7028,
7031
],
[
7141,
7144
],
[
7192,
7195
],
[
7242,
7245
],
[
7421,
7424
],
[
7615,
7618
],
[
8044,
8047
],
[
8243,
8246
],
[
8328,
8331
],
[
8387,
8390
]
],
[
[
237,
248
],
[
4217,
4228
]
],
[
[
276,
295
],
[
7506,
7525
],
[
8129,
8148
]
],
[
[
1569,
1587
],
[
8431,
8449
]
],
[
[
2238,
2247
],
[
7754,
7763
]
],
[
[
3010,
3018
],
[
7863,
7871
]
],
[
[
3937,
3946
]
]
] |
from pymongo import *
from flask import *
from flask_restful import *
import datetime
mongodb_url = "mongodb://Ranuga:ranuga2008@cluster0-shard-00-00.6n3dg.mongodb.net:27017,cluster0-shard-00-01.6n3dg.mongodb.net:27017,cluster0-shard-00-02.6n3dg.mongodb.net:27017/myFirstDatabase?ssl=true&replicaSet=atlas-uo9rgq-shard-0&authSource=admin&retryWrites=true&w=majority"
app = Flask(__name__)
app.debug = True
app.secret_key = "development"
cluster = MongoClient(mongodb_url)
from server.routes import *
| [
[
[
20,
21
]
],
[
[
40,
41
]
],
[
[
68,
69
],
[
375,
380
],
[
449,
460
]
],
[
[
77,
85
]
],
[
[
88,
99
],
[
461,
472
]
],
[
[
369,
372
],
[
391,
394
],
[
408,
411
]
],
[
[
439,
446
]
],
[
[
500,
501
]
]
] |
"""
Classes for GP models with Stan that perform transfer optimization.
"""
from argparse import Namespace
import numpy as np
import copy
from .gp_stan import StanGp
from .regression.transfer_regression import TransferRegression
from ..util.misc_util import dict_to_namespace
class StanTransferGp(StanGp):
"""
GP model with transferred prior mean based on a regression model.
"""
def __init__(self, params=None, data=None, verbose=None):
self.set_params(params)
self.set_verbose(verbose)
self.set_model(data)
def set_params(self, params):
"""Set self.params, the parameters for this model."""
super().set_params(params)
params = dict_to_namespace(params)
assert hasattr(params, 'transfer_config')
self.params.transfer_config = params.transfer_config
def set_model(self, data):
"""Set GP Stan model and regression model."""
self.model = self.get_model()
self.regressor = self.get_regressor(data)
#self.regressor = self.get_proxy_regressor(data) # TODO
def get_regressor(self, data):
"""Return transfer (prior mean) regressor."""
# Define regressor
regressor = TransferRegression(self.params.transfer_config)
if len(data.x) < 1:
regressor = None
else:
mean_errors = []
# TODO: remove extra files such as .DS_STORE (or ignore files that break)
for i, reg in enumerate(regressor.model_fnames):
try:
val_acc = regressor.evaluate_model(reg, data.x)
error = np.mean((data.y - val_acc) ** 2)
mean_errors.append((error, i))
except:
print(f'Transfer model file in tarball did not load: {reg}')
mean_errors.sort()
if mean_errors[0][0] > self.params.transfer_config.get('metric_threshold', 0.6):
regressor.set_best_model(-1)
else:
regressor.set_best_model(mean_errors[0][1])
return regressor
def get_proxy_regressor(self, data):
if not data:
regressor = None
else:
def regressor(x): return np.linalg.norm(x)
return regressor
def transform_data_y(self):
"""Transform data.y using PriorMeanDataTransformer."""
self.dt = PriorMeanDataTransformer(self.data, self.regressor, False)
y_trans = self.dt.transform_y_data()
self.data = Namespace(x=self.data.x, y=y_trans)
def gen_list(self, x_list, z, s, nsamp):
"""
Draw nsamp samples from generative process, given list of inputs
x_list, posterior sample z, and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,)
z : Namespace
Namespace of GP hyperparameters.
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from generative process.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_pred(nsamp, x_list)
pred_list = [
self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)
]
return pred_list
def postgen_list(self, x_list, s, nsamp):
"""
Draw nsamp samples from posterior predictive distribution, given list
of inputs x_list and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,).
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from the posterior predictive
distribution.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_post_pred(
nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])
)
pred_list = [
self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)
]
return pred_list
def __str__(self):
return f'StanTransferGp with params={self.params}'
class PriorMeanDataTransformer:
"""
A class to transform (and inverse transform) data, based on a prior mean regression.
"""
def __init__(self, data, prior_mean_f, verbose=True):
"""
Parameters
----------
data : Namespace
Namespace containing data.
prior_mean_f : function
Prior mean function.
verbose : bool
If True, print description string.
"""
self._set_data(data)
self._set_prior_mean_f(prior_mean_f)
self._set_verbose(verbose)
def _set_data(self, data):
"""Set self.data"""
self.data = data
def _set_prior_mean_f(self, prior_mean_f):
"""Set self.prior_mean_f."""
if prior_mean_f is None:
# Default prior mean function is constant 0 function
def prior_mean_f(x): return 0.
self.prior_mean_f = prior_mean_f
def _set_verbose(self, verbose):
"""Set verbose options."""
self.verbose = verbose
if self.verbose:
self._print_str()
def transform_y_data(self, y_data=None, x_data=None):
"""Transform and return self.data.y"""
# Transform self.data.y into new list
y_trans = [y - self.prior_mean_f(x) for x, y in zip(self.data.x, self.data.y)]
return y_trans
def inv_transform_y_data(self, y_arr, x_single_arr):
"""Return inverse transform of y_arr."""
# Compute prior mean val for the single input
prior_mean_val = self.prior_mean_f(x_single_arr)
# Inverse transform y_arr into list
y_inv_trans_list = [y + prior_mean_val for y in list(y_arr)]
# Transform back to array and return
y_inv_trans = np.array(y_inv_trans_list).reshape(-1)
return y_inv_trans
def _print_str(self):
"""Print a description string."""
print('*PriorMeanDataTransformer')
| [
[
[
98,
107
],
[
2517,
2526
]
],
[
[
115,
126
],
[
1630,
1632
],
[
4259,
4261
],
[
6257,
6259
],
[
2235,
2237
]
],
[
[
134,
138
]
],
[
[
161,
167
],
[
301,
307
]
],
[
[
212,
230
],
[
1216,
1234
]
],
[
[
260,
277
],
[
702,
719
]
],
[
[
286,
300
]
],
[
[
4521,
4545
],
[
2393,
2417
]
]
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetManagementLockAtResourceLevelResult',
'AwaitableGetManagementLockAtResourceLevelResult',
'get_management_lock_at_resource_level',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""", DeprecationWarning)
@pulumi.output_type
class GetManagementLockAtResourceLevelResult:
"""
The lock information.
"""
def __init__(__self__, id=None, level=None, name=None, notes=None, owners=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if level and not isinstance(level, str):
raise TypeError("Expected argument 'level' to be a str")
pulumi.set(__self__, "level", level)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notes and not isinstance(notes, str):
raise TypeError("Expected argument 'notes' to be a str")
pulumi.set(__self__, "notes", notes)
if owners and not isinstance(owners, list):
raise TypeError("Expected argument 'owners' to be a list")
pulumi.set(__self__, "owners", owners)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID of the lock.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def level(self) -> str:
"""
The level of the lock. Possible values are: NotSpecified, CanNotDelete, ReadOnly. CanNotDelete means authorized users are able to read and modify the resources, but not delete. ReadOnly means authorized users can only read from a resource, but they can't modify or delete it.
"""
return pulumi.get(self, "level")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the lock.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def notes(self) -> Optional[str]:
"""
Notes about the lock. Maximum of 512 characters.
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def owners(self) -> Optional[Sequence['outputs.ManagementLockOwnerResponse']]:
"""
The owners of the lock.
"""
return pulumi.get(self, "owners")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type of the lock - Microsoft.Authorization/locks.
"""
return pulumi.get(self, "type")
class AwaitableGetManagementLockAtResourceLevelResult(GetManagementLockAtResourceLevelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagementLockAtResourceLevelResult(
id=self.id,
level=self.level,
name=self.name,
notes=self.notes,
owners=self.owners,
type=self.type)
def get_management_lock_at_resource_level(lock_name: Optional[str] = None,
parent_resource_path: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
resource_provider_namespace: Optional[str] = None,
resource_type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagementLockAtResourceLevelResult:
"""
The lock information.
Latest API Version: 2016-09-01.
:param str lock_name: The name of lock.
:param str parent_resource_path: An extra path parameter needed in some services, like SQL Databases.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the resource.
:param str resource_provider_namespace: The namespace of the resource provider.
:param str resource_type: The type of the resource.
"""
pulumi.log.warn("""get_management_lock_at_resource_level is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:authorization:getManagementLockAtResourceLevel'.""")
__args__ = dict()
__args__['lockName'] = lock_name
__args__['parentResourcePath'] = parent_resource_path
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
__args__['resourceProviderNamespace'] = resource_provider_namespace
__args__['resourceType'] = resource_type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:authorization/latest:getManagementLockAtResourceLevel', __args__, opts=opts, typ=GetManagementLockAtResourceLevelResult).value
return AwaitableGetManagementLockAtResourceLevelResult(
id=__ret__.id,
level=__ret__.level,
name=__ret__.name,
notes=__ret__.notes,
owners=__ret__.owners,
type=__ret__.type)
| [
[
[
176,
184
],
[
500,
508
]
],
[
[
192,
198
]
],
[
[
206,
220
],
[
698,
704
],
[
1881,
1887
],
[
2039,
2045
],
[
2450,
2456
],
[
2605,
2611
],
[
2799,
2805
],
[
3014,
3020
],
[
1018,
1024
],
[
1175,
1181
],
[
1335,
1341
],
[
1496,
1502
],
[
1664,
1670
],
[
1826,
1832
],
[
1996,
2002
],
[
2404,
2410
],
[
2560,
2566
],
[
2753,
2759
],
[
2967,
2973
],
[
3165,
3171
],
[
4178,
4184
],
[
4759,
4765
],
[
5361,
5367
],
[
5475,
5481
]
],
[
[
240,
243
]
],
[
[
245,
252
]
],
[
[
254,
262
],
[
2642,
2650
],
[
2837,
2845
],
[
3677,
3685
],
[
3763,
3771
],
[
3848,
3856
],
[
3927,
3935
],
[
4020,
4028
],
[
4099,
4107
],
[
4169,
4177
]
],
[
[
264,
272
],
[
2846,
2854
]
],
[
[
274,
279
]
],
[
[
296,
306
],
[
5436,
5446
]
],
[
[
308,
315
]
],
[
[
330,
337
]
],
[
[
339,
346
]
],
[
[
723,
761
],
[
3246,
3284
],
[
3410,
3448
],
[
5592,
5630
]
],
[
[
3198,
3245
],
[
4211,
4258
],
[
5650,
5697
]
],
[
[
3628,
3665
]
]
] |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mvn.utils.img import to_numpy, to_torch
from mvn.utils import multiview
def integrate_tensor_2d(heatmaps, softmax=True):
"""Applies softmax to heatmaps and integrates them to get their's "center of masses"
Args:
heatmaps torch tensor of shape (batch_size, n_heatmaps, h, w): input heatmaps
Returns:
coordinates torch tensor of shape (batch_size, n_heatmaps, 2): coordinates of center of masses of all heatmaps
"""
batch_size, n_heatmaps, h, w = heatmaps.shape
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, -1))
if softmax:
heatmaps = nn.functional.softmax(heatmaps, dim=2)
else:
heatmaps = nn.functional.relu(heatmaps)
heatmaps = heatmaps.reshape((batch_size, n_heatmaps, h, w))
mass_x = heatmaps.sum(dim=2)
mass_y = heatmaps.sum(dim=3)
mass_times_coord_x = mass_x * torch.arange(w).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(h).type(torch.float).to(mass_y.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y), dim=2)
coordinates = coordinates.reshape((batch_size, n_heatmaps, 2))
return coordinates
def integrate_tensor_3d(volumes, softmax=True):
batch_size, n_volumes, x_size, y_size, z_size = volumes.shape
volumes = volumes.reshape((batch_size, n_volumes, -1))
if softmax:
volumes = nn.functional.softmax(volumes, dim=2)
else:
volumes = nn.functional.relu(volumes)
volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))
mass_x = volumes.sum(dim=3).sum(dim=3)
mass_y = volumes.sum(dim=2).sum(dim=3)
mass_z = volumes.sum(dim=2).sum(dim=2)
mass_times_coord_x = mass_x * torch.arange(x_size).type(torch.float).to(mass_x.device)
mass_times_coord_y = mass_y * torch.arange(y_size).type(torch.float).to(mass_y.device)
mass_times_coord_z = mass_z * torch.arange(z_size).type(torch.float).to(mass_z.device)
x = mass_times_coord_x.sum(dim=2, keepdim=True)
y = mass_times_coord_y.sum(dim=2, keepdim=True)
z = mass_times_coord_z.sum(dim=2, keepdim=True)
if not softmax:
x = x / mass_x.sum(dim=2, keepdim=True)
y = y / mass_y.sum(dim=2, keepdim=True)
z = z / mass_z.sum(dim=2, keepdim=True)
coordinates = torch.cat((x, y, z), dim=2)
coordinates = coordinates.reshape((batch_size, n_volumes, 3))
return coordinates, volumes
def integrate_tensor_3d_with_coordinates(volumes, coord_volumes, softmax=True):
batch_size, n_volumes, x_size, y_size, z_size = volumes.shape
volumes = volumes.reshape((batch_size, n_volumes, -1))
if softmax:
volumes = nn.functional.softmax(volumes, dim=2)
else:
volumes = nn.functional.relu(volumes)
volumes = volumes.reshape((batch_size, n_volumes, x_size, y_size, z_size))
coordinates = torch.einsum("bnxyz, bxyzc -> bnc", volumes, coord_volumes)
return coordinates #, volumes
def unproject_heatmaps(heatmaps, proj_matricies, coord_volumes, volume_aggregation_method='sum', vol_confidences=None):
device = heatmaps.device
batch_size, n_views, n_joints, heatmap_shape = heatmaps.shape[0], heatmaps.shape[1], heatmaps.shape[2], tuple(heatmaps.shape[3:]) # 1,4,32,96x96
volume_shape = coord_volumes.shape[1:4] #64x64x64
volume_batch = torch.zeros(batch_size, n_joints, *volume_shape, device=device) # 1x32x64x64x64のTensor
# TODO: speed up this this loop
for batch_i in range(batch_size):
coord_volume = coord_volumes[batch_i] # Bx64x64x64x3 -> 64x64x64x3
grid_coord = coord_volume.reshape((-1, 3)) # 262144x3
volume_batch_to_aggregate = torch.zeros(n_views, n_joints, *volume_shape, device=device) # 4x32x64x64x64
for view_i in range(n_views):
heatmap = heatmaps[batch_i, view_i] # 1x4x32x96x96 -> 32x96x96
heatmap = heatmap.unsqueeze(0) # 1x32x96x96 (一番初めに次元を追加)
grid_coord_proj = multiview.project_3d_points_to_image_plane_without_distortion( # 262144x3
proj_matricies[batch_i, view_i], grid_coord, convert_back_to_euclidean=False
)
invalid_mask = grid_coord_proj[:, 2] <= 0.0 # depth must be larger than 0.0 #人がカメラに近づきすぎた場合に起こる??
grid_coord_proj[grid_coord_proj[:, 2] == 0.0, 2] = 1.0 # not to divide by zero
grid_coord_proj = multiview.homogeneous_to_euclidean(grid_coord_proj)
# transform to [-1.0, 1.0] range
grid_coord_proj_transformed = torch.zeros_like(grid_coord_proj) # 262144x2
grid_coord_proj_transformed[:, 0] = 2 * (grid_coord_proj[:, 0] / heatmap_shape[0] - 0.5) # (0,0)->(96,96)の座標を、中心を(0,0)、左上を(-1,-1)、右下を(1,1)とする相対的な座標に変換
grid_coord_proj_transformed[:, 1] = 2 * (grid_coord_proj[:, 1] / heatmap_shape[1] - 0.5)
grid_coord_proj = grid_coord_proj_transformed
# prepare to F.grid_sample
grid_coord_proj = grid_coord_proj.unsqueeze(1).unsqueeze(0) # 引数で指定された場所に一つ次元を足すらしい 1x262144x1x2。heatmapが1x32x96x96
try:
current_volume = F.grid_sample(heatmap, grid_coord_proj, align_corners=True) # 1x32x262144x1 = Heatmap(1x32x96x96), grid_coord_proj(1x262144x1x2)
except TypeError: # old PyTorch
current_volume = F.grid_sample(heatmap, grid_coord_proj)
# zero out non-valid points
current_volume = current_volume.view(n_joints, -1) #32x262144
current_volume[:, invalid_mask] = 0.0
# reshape back to volume
current_volume = current_volume.view(n_joints, *volume_shape) #32x64x64x64
# collect
volume_batch_to_aggregate[view_i] = current_volume
# agregate resulting volume
if volume_aggregation_method.startswith('conf'):
volume_batch[batch_i] = (volume_batch_to_aggregate * vol_confidences[batch_i].view(n_views, n_joints, 1, 1, 1)).sum(0)
elif volume_aggregation_method == 'sum':
volume_batch[batch_i] = volume_batch_to_aggregate.sum(0)
elif volume_aggregation_method == 'max':
volume_batch[batch_i] = volume_batch_to_aggregate.max(0)[0]
elif volume_aggregation_method == 'softmax':
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate.clone() # 2x32x64x64x64(n_views, n_joints, *volume_shape)
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, -1) # reshape
volume_batch_to_aggregate_softmin = nn.functional.softmax(volume_batch_to_aggregate_softmin, dim=0)
volume_batch_to_aggregate_softmin = volume_batch_to_aggregate_softmin.view(n_views, n_joints, *volume_shape) #reshape back
volume_batch[batch_i] = (volume_batch_to_aggregate * volume_batch_to_aggregate_softmin).sum(0)
else:
raise ValueError("Unknown volume_aggregation_method: {}".format(volume_aggregation_method))
return volume_batch
def gaussian_2d_pdf(coords, means, sigmas, normalize=True):
normalization = 1.0
if normalize:
normalization = (2 * np.pi * sigmas[:, 0] * sigmas[:, 0])
exp = torch.exp(-((coords[:, 0] - means[:, 0]) ** 2 / sigmas[:, 0] ** 2 + (coords[:, 1] - means[:, 1]) ** 2 / sigmas[:, 1] ** 2) / 2)
return exp / normalization
def render_points_as_2d_gaussians(points, sigmas, image_shape, normalize=True):
device = points.device
n_points = points.shape[0]
yy, xx = torch.meshgrid(torch.arange(image_shape[0]).to(device), torch.arange(image_shape[1]).to(device))
grid = torch.stack([xx, yy], dim=-1).type(torch.float32)
grid = grid.unsqueeze(0).repeat(n_points, 1, 1, 1) # (n_points, h, w, 2)
grid = grid.reshape((-1, 2))
points = points.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)
points = points.reshape(-1, 2)
sigmas = sigmas.unsqueeze(1).unsqueeze(1).repeat(1, image_shape[0], image_shape[1], 1)
sigmas = sigmas.reshape(-1, 2)
images = gaussian_2d_pdf(grid, points, sigmas, normalize=normalize)
images = images.reshape(n_points, *image_shape)
return images
| [
[
[
7,
18
],
[
7390,
7392
]
],
[
[
27,
32
],
[
956,
961
],
[
977,
982
],
[
1042,
1047
],
[
1063,
1068
],
[
1335,
1340
],
[
2000,
2005
],
[
2026,
2031
],
[
2091,
2096
],
[
2117,
2122
],
[
2182,
2187
],
[
2208,
2213
],
[
2580,
2585
],
[
3141,
3146
],
[
3610,
3615
],
[
3946,
3951
],
[
4793,
4798
],
[
7438,
7443
],
[
7751,
7756
],
[
7766,
7771
],
[
7807,
7812
],
[
7859,
7864
],
[
7894,
7899
]
],
[
[
40,
54
],
[
692,
694
],
[
760,
762
],
[
1661,
1663
],
[
1727,
1729
],
[
2949,
2951
],
[
3015,
3017
],
[
6807,
6809
]
],
[
[
62,
86
],
[
5378,
5379
],
[
5584,
5585
]
],
[
[
114,
122
]
],
[
[
124,
132
]
],
[
[
155,
164
],
[
4237,
4246
],
[
4653,
4662
]
],
[
[
171,
190
]
],
[
[
1457,
1476
]
],
[
[
2713,
2749
]
],
[
[
3242,
3260
]
],
[
[
7263,
7278
],
[
8288,
8303
]
],
[
[
7603,
7632
]
]
] |
from calendar import timegm
from datetime import date, datetime, time
import sqlite3
from typing import Callable
import julian # type: ignore
def store_time(time_type: str, time_format: str = "") -> None:
if time_type == "seconds":
sqlite3.register_adapter(time, time_to_seconds)
elif time_type == "text":
sqlite3.register_adapter(time, time_to_text(time_format))
else:
raise ValueError(f"Unknown time adapter: '{time_type}'")
def store_date(date_type: str, date_format: str = "") -> None:
if date_type == "julian":
sqlite3.register_adapter(date, date_to_julian)
elif date_type == "posix":
sqlite3.register_adapter(date, date_to_posix)
elif date_type == "text":
sqlite3.register_adapter(date, date_to_text(date_format))
else:
raise ValueError(f"Unknown date adapter: '{date_type}'")
def store_datetime(datetime_type: str, datetime_format: str = "") -> None:
if datetime_type == "julian":
sqlite3.register_adapter(datetime, datetime_to_julian)
elif datetime_type == "posix":
sqlite3.register_adapter(datetime, datetime_to_posix)
elif datetime_type == "text":
sqlite3.register_adapter(datetime, datetime_to_text(datetime_format))
else:
raise ValueError(f"Unknown datetime adapter: '{datetime_type}'")
def time_to_seconds(t: time) -> float:
return (60 * 60 * t.hour) + (60 * t.minute) + t.second + t.microsecond
def time_to_text(format: str) -> Callable[[time], str]:
def _time_to_text(t: time) -> str:
return t.strftime(format)
return _time_to_text
def date_to_posix(d: date) -> int:
return datetime_to_posix(datetime(d.year, d.month, d.day))
def date_to_julian(d: date) -> float:
return datetime_to_julian(datetime(d.year, d.month, d.day))
def date_to_text(format: str) -> Callable[[date], str]:
def _date_to_text(d: date) -> str:
return d.strftime(format)
return _date_to_text
def datetime_to_posix(dt: datetime) -> int:
return timegm(dt.utctimetuple())
def datetime_to_julian(dt: datetime) -> float:
return float(julian.to_jd(dt))
def datetime_to_text(format: str) -> Callable[[datetime], str]:
def _datetime_to_text(dt: datetime) -> str:
return dt.strftime(format)
return _datetime_to_text
| [
[
[
21,
27
],
[
2030,
2036
]
],
[
[
49,
53
],
[
595,
599
],
[
681,
685
],
[
765,
769
],
[
1635,
1639
],
[
1736,
1740
],
[
1861,
1865
],
[
1899,
1903
]
],
[
[
55,
63
],
[
1017,
1025
],
[
1115,
1123
],
[
1211,
1219
],
[
1678,
1686
],
[
1782,
1790
],
[
2001,
2009
],
[
2085,
2093
],
[
2189,
2197
],
[
2236,
2244
]
],
[
[
65,
69
],
[
273,
277
],
[
359,
363
],
[
1364,
1368
],
[
1500,
1504
],
[
1538,
1542
]
],
[
[
77,
84
],
[
248,
255
],
[
334,
341
],
[
570,
577
],
[
656,
663
],
[
740,
747
],
[
992,
999
],
[
1090,
1097
],
[
1186,
1193
]
],
[
[
104,
112
],
[
1490,
1498
],
[
1851,
1859
],
[
2179,
2187
]
],
[
[
121,
127
],
[
2122,
2128
]
],
[
[
150,
160
]
],
[
[
473,
483
]
],
[
[
879,
893
]
],
[
[
1345,
1360
],
[
279,
294
]
],
[
[
1461,
1473
],
[
365,
377
]
],
[
[
1618,
1631
],
[
687,
700
]
],
[
[
1718,
1732
],
[
601,
615
]
],
[
[
1822,
1834
],
[
771,
783
]
],
[
[
1979,
1996
],
[
1125,
1142
],
[
1660,
1677
]
],
[
[
2062,
2080
],
[
1027,
1045
],
[
1763,
1781
]
],
[
[
2146,
2162
],
[
1221,
1237
]
]
] |
import click
import logging
from .constants import WELCOME_TEXT
from .api import run_server
from .logger import OrigamiLogger
logger = OrigamiLogger(
file_log_level=logging.DEBUG, console_log_level=logging.DEBUG)
@click.group(invoke_without_command=True)
@click.pass_context
def main(ctx):
"""
Origami daemon is an application which deploys and manages demos on
CloudCV servers.
"""
if not ctx.invoked_subcommand:
click.echo(WELCOME_TEXT)
main.add_command(run_server)
| [
[
[
7,
12
],
[
222,
227
],
[
264,
269
],
[
450,
455
]
],
[
[
20,
27
],
[
171,
178
],
[
204,
211
]
],
[
[
52,
64
],
[
461,
473
]
],
[
[
82,
92
],
[
494,
504
]
],
[
[
113,
126
],
[
137,
150
]
],
[
[
128,
134
]
],
[
[
287,
291
],
[
477,
481
]
]
] |
from __future__ import annotations
import itertools
import math
from dataclasses import dataclass
from typing import Any
@dataclass
class TreeZipper:
inner: Any
path: list[int]
def up(self):
if self.path:
return TreeZipper(self.inner, self.path[:-1]), self.path[-1]
return None
def get(self):
v = self.inner
for p in self.path:
v = v[p]
return v
def set(self, x):
v = self.inner
for p in self.path[:-1]:
v = v[p]
v[self.path[-1]] = x
def try_left(self):
v = self.get()
if isinstance(v, list):
return TreeZipper(self.inner, self.path + [0])
return None
def try_right(self):
v = self.get()
if isinstance(v, list):
return TreeZipper(self.inner, self.path + [1])
return None
class Whoop(Exception):
pass
def do_reduce_exp(v: TreeZipper, depth):
if depth == 4 and isinstance(v.get(), list):
# print("exploding")
l, r = v.get()
v.set(0)
l_v = v
came_from_left = False
dont_go = False
while True:
# print("left", l_v, l_v.get())
if (l_v_n := l_v.try_left()) != None and not came_from_left:
l_v = l_v_n
break
elif (l_v_n_v := l_v.up()) != None:
# if we can up and didn't go left, do so
l_v = l_v_n_v[0]
came_from_left = l_v_n_v[1] == 0
else:
dont_go = True
# if we did nothing, we have to have reached the top and we were already from the left
break
if not dont_go:
while True:
if (l_v_n := l_v.try_right()) != None:
l_v = l_v_n
# try to go down and to the left
if isinstance(l_v.get(), int):
# if it's an int, add and quit
l_v.set(l_v.get() + l)
break
l_v = v
came_from_right = False
dont_go = False
while True:
# print("right", l_v, l_v.get())
if (l_v_n := l_v.try_right()) != None and not came_from_right:
l_v = l_v_n
break
elif (l_v_n_v := l_v.up()) != None:
# if we can up and didn't go left, do so
l_v = l_v_n_v[0]
came_from_right = l_v_n_v[1] == 1
else:
# if we did nothing, we have to have reached the top, bail
dont_go = True
break
if not dont_go:
while True:
if (l_v_n := l_v.try_left()) != None:
l_v = l_v_n
# try to go down and to the left
if isinstance(l_v.get(), int):
# if it's an int, add and quit
l_v.set(l_v.get() + r)
break
raise Whoop()
if (l_v := v.try_left()) != None:
do_reduce_exp(l_v, depth + 1)
if (r_v := v.try_right()) != None:
do_reduce_exp(r_v, depth + 1)
def do_reduce_splt(v: TreeZipper):
n_v = v.get()
if isinstance(n_v, int):
if n_v >= 10:
# print("splitting")
l_v = math.floor(n_v / 2)
r_v = math.ceil(n_v / 2)
v.set([l_v, r_v])
raise Whoop()
# otherwise, go and reduce both sides
if (l_v := v.try_left()) != None:
do_reduce_splt(l_v)
if (r_v := v.try_right()) != None:
do_reduce_splt(r_v)
def iter_red(l):
# print("doing", l)
while True:
t = TreeZipper(l, [])
try:
# print(l)
do_reduce_exp(t, 0)
do_reduce_splt(t)
except Whoop:
pass
else:
print("did nothing")
return
def do_mag(v: TreeZipper):
if isinstance(v.get(), int):
return v.get()
return 3 * do_mag(v.try_left()) + 2 * do_mag(v.try_right())
inp = [
[[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]],
[[[3,6],[9,4]],[[[5,9],5],[8,0]]],
[[[2,2],2],[1,[[1,6],7]]],
[[[[0,9],7],[[3,2],8]],[6,[7,9]]],
[[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]],
[[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]],
[[4,[8,2]],[1,[0,5]]],
[8,[8,7]],
[[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]],
[[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]],
[0,7],
[[[[5,1],3],[8,[5,3]]],7],
[[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]],
[[[[3,4],2],0],4],
[[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]],
[[[3,[2,5]],[3,3]],7],
[[[[5,1],1],[4,8]],[[5,[8,3]],2]],
[[4,[[8,1],[8,5]]],[[[4,1],0],6]],
[[[5,5],[5,9]],[0,[[6,8],[0,1]]]],
[4,[[[7,9],4],0]],
[[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]],
[[[7,7],[8,0]],[6,[8,[7,9]]]],
[[[9,2],1],6],
[[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]],
[[2,[[4,7],5]],1],
[[8,7],[[[2,0],7],[1,[0,3]]]],
[[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]],
[[3,4],[[9,4],5]],
[[5,[[8,3],5]],1],
[[0,[[9,0],[3,2]]],[2,[7,[5,1]]]],
[[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]],
[[[1,[5,2]],9],[[4,6],[3,[8,0]]]],
[[1,7],[[1,7],9]],
[[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]],
[[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]],
[[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]],
[[[8,9],0],3],
[[[1,7],[1,[3,9]]],[6,[0,[8,5]]]],
[[0,5],[6,5]],
[[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]],
[[8,[[0,9],8]],[9,[7,[7,9]]]],
[0,[[[7,1],2],[[0,4],4]]],
[[0,[[9,1],5]],[1,4]],
[3,4],
[[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]],
[[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]],
[8,[[1,[3,0]],[[7,9],4]]],
[[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]],
[[3,[[9,6],6]],2],
[[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]],
[[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]],
[[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]],
[[3,[7,1]],[9,[[1,8],7]]],
[[9,1],[0,[[0,7],[7,1]]]],
[[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]],
[8,[[[2,1],[6,9]],[[3,3],[4,6]]]],
[0,[7,[3,0]]],
[[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]],
[[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]],
[[[[5,1],4],[[1,2],1]],7],
[[[3,[7,5]],7],3],
[[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]],
[2,7],
[[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]],
[[[0,[6,4]],2],[4,[7,[7,5]]]],
[[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]],
[[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]],
[[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]],
[[[3,[0,2]],3],8],
[[[4,[4,9]],9],[[[4,4],5],9]],
[[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]],
[[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]],
[[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]],
[[9,[[9,1],5]],[[4,[1,1]],2]],
[[[[7,4],[0,3]],7],[8,[6,[3,3]]]],
[5,5],
[[6,7],[1,[7,[8,1]]]],
[[1,[0,4]],7],
[[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]],
[[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]],
[[[[8,3],7],5],[9,[[5,1],8]]],
[[[[4,0],[5,2]],[[0,0],7]],2],
[[[[0,1],6],2],[[8,2],6]],
[[[[2,4],1],[[6,7],9]],[[[1,6],9],3]],
[[5,5],[[8,[7,7]],[5,8]]],
[[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]],
[[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]],
[[7,[6,2]],[[9,[5,2]],[1,4]]],
[[[7,[5,9]],[[3,9],[4,5]]],[0,6]],
[[9,[8,[2,2]]],[[9,7],[1,1]]],
[[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]],
[[0,[[9,3],0]],[8,8]],
[[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]],
[[9,[[8,3],[5,8]]],[[7,[3,0]],3]],
[[[4,[4,2]],0],1],
[[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]],
[[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]],
[[3,[5,[0,3]]],[5,4]],
[[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]],
[[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]]
]
inp = [
[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]],
[[[5,[2,8]],4],[5,[[9,9],0]]],
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]],
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]],
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]],
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]],
[[[[5,4],[7,7]],8],[[8,3],8]],
[[9,3],[[9,9],[6,[4,9]]]],
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]],
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]
]
# inp = [
# [[[[[7,0],[7,7]],[[7,7],[7,8]]],[[[7,7],[8,8]],[[7,7],[8,7]]]],[7,[5,[[3,8],[1,4]]]]]
# ]
def do_add(l):
it = iter(l)
x = next(it)
iter_red(x)
for y in it:
x = [x, y]
iter_red(x)
return x
out = do_add(inp)
print(out)
print(do_mag(TreeZipper(out, [])))
import copy
inp = [
[[[[7,1],[0,0]],[6,[8,2]]],[8,[3,8]]],
[[[3,6],[9,4]],[[[5,9],5],[8,0]]],
[[[2,2],2],[1,[[1,6],7]]],
[[[[0,9],7],[[3,2],8]],[6,[7,9]]],
[[[[4,1],6],[[7,6],[2,2]]],[[[1,1],9],4]],
[[[8,[3,7]],3],[[4,4],[[9,1],[3,5]]]],
[[4,[8,2]],[1,[0,5]]],
[8,[8,7]],
[[[[2,2],7],[3,[4,5]]],[[4,6],[[2,5],4]]],
[[[5,5],[[5,1],3]],[[2,[8,2]],[[6,9],[1,5]]]],
[0,7],
[[[[5,1],3],[8,[5,3]]],7],
[[5,[2,[0,6]]],[[[5,5],2],[9,[8,0]]]],
[[[[3,4],2],0],4],
[[[[5,3],[2,7]],6],[[4,0],[9,[7,2]]]],
[[[3,[2,5]],[3,3]],7],
[[[[5,1],1],[4,8]],[[5,[8,3]],2]],
[[4,[[8,1],[8,5]]],[[[4,1],0],6]],
[[[5,5],[5,9]],[0,[[6,8],[0,1]]]],
[4,[[[7,9],4],0]],
[[[[0,1],7],[[3,6],5]],[8,[5,[6,1]]]],
[[[7,7],[8,0]],[6,[8,[7,9]]]],
[[[9,2],1],6],
[[[4,4],[2,[5,0]]],[[[2,6],6],[5,[4,3]]]],
[[2,[[4,7],5]],1],
[[8,7],[[[2,0],7],[1,[0,3]]]],
[[9,[[9,3],[9,5]]],[[8,7],[[4,1],[6,5]]]],
[[3,4],[[9,4],5]],
[[5,[[8,3],5]],1],
[[0,[[9,0],[3,2]]],[2,[7,[5,1]]]],
[[9,[[9,5],[8,6]]],[[4,4],[[3,8],[1,6]]]],
[[[1,[5,2]],9],[[4,6],[3,[8,0]]]],
[[1,7],[[1,7],9]],
[[[[3,4],3],[[7,5],[9,1]]],[[[5,0],[3,0]],[[7,9],6]]],
[[[7,2],[[1,0],[5,6]]],[[[3,7],[8,9]],6]],
[[[[1,1],1],[[8,6],[9,8]]],[[[1,8],4],[8,9]]],
[[[8,9],0],3],
[[[1,7],[1,[3,9]]],[6,[0,[8,5]]]],
[[0,5],[6,5]],
[[[[6,8],[4,5]],[[7,4],6]],[[3,6],5]],
[[8,[[0,9],8]],[9,[7,[7,9]]]],
[0,[[[7,1],2],[[0,4],4]]],
[[0,[[9,1],5]],[1,4]],
[3,4],
[[[9,3],[1,3]],[[[4,8],3],[[1,3],[9,0]]]],
[[[[5,1],7],[[9,2],8]],[[[6,8],[5,4]],[0,1]]],
[8,[[1,[3,0]],[[7,9],4]]],
[[[6,4],[[2,9],[9,0]]],[7,[[0,0],3]]],
[[3,[[9,6],6]],2],
[[5,[[3,1],[7,5]]],[[[6,7],9],[[4,6],[5,2]]]],
[[[4,[6,5]],8],[[6,[8,0]],[[9,3],3]]],
[[[[4,9],[2,8]],9],[[[5,0],0],[[3,4],[2,8]]]],
[[3,[7,1]],[9,[[1,8],7]]],
[[9,1],[0,[[0,7],[7,1]]]],
[[7,[0,[7,6]]],[[[5,3],1],[6,[4,5]]]],
[8,[[[2,1],[6,9]],[[3,3],[4,6]]]],
[0,[7,[3,0]]],
[[[[1,6],3],[5,[8,0]]],[[[6,6],7],1]],
[[[7,[8,3]],3],[[[2,8],5],[0,[9,5]]]],
[[[[5,1],4],[[1,2],1]],7],
[[[3,[7,5]],7],3],
[[9,[6,[1,1]]],[[[4,1],[2,2]],[[9,5],[7,7]]]],
[2,7],
[[[9,[8,6]],[[9,0],[6,5]]],[[[6,7],5],[[7,7],[2,3]]]],
[[[0,[6,4]],2],[4,[7,[7,5]]]],
[[[[6,1],[9,1]],[[6,1],9]],[[2,6],0]],
[[0,[[1,8],[3,5]]],[4,[[8,2],[4,2]]]],
[[[[9,3],[4,2]],2],[[[2,1],[7,1]],[4,8]]],
[[[3,[0,2]],3],8],
[[[4,[4,9]],9],[[[4,4],5],9]],
[[[[8,2],7],9],[[[1,0],[3,8]],[[7,7],0]]],
[[[3,2],[9,7]],[[9,[8,2]],[[5,5],3]]],
[[[7,[3,1]],[[8,3],1]],[[[8,6],[7,0]],4]],
[[9,[[9,1],5]],[[4,[1,1]],2]],
[[[[7,4],[0,3]],7],[8,[6,[3,3]]]],
[5,5],
[[6,7],[1,[7,[8,1]]]],
[[1,[0,4]],7],
[[[4,0],[[0,1],[2,2]]],[9,[[9,9],[3,0]]]],
[[[6,0],[[8,6],3]],[[5,1],[[8,1],[2,7]]]],
[[[[8,3],7],5],[9,[[5,1],8]]],
[[[[4,0],[5,2]],[[0,0],7]],2],
[[[[0,1],6],2],[[8,2],6]],
[[[[2,4],1],[[6,7],9]],[[[1,6],9],3]],
[[5,5],[[8,[7,7]],[5,8]]],
[[6,[[9,2],[9,7]]],[[[8,5],[4,4]],7]],
[[[9,[7,7]],[6,0]],[7,[[8,7],[1,2]]]],
[[7,[6,2]],[[9,[5,2]],[1,4]]],
[[[7,[5,9]],[[3,9],[4,5]]],[0,6]],
[[9,[8,[2,2]]],[[9,7],[1,1]]],
[[[[2,3],4],[[4,8],9]],[[9,[8,6]],[[0,9],0]]],
[[0,[[9,3],0]],[8,8]],
[[[[2,9],6],[[2,8],9]],[[[0,5],6],[[6,1],7]]],
[[9,[[8,3],[5,8]]],[[7,[3,0]],3]],
[[[4,[4,2]],0],1],
[[[[9,6],[5,8]],[6,2]],[[[8,0],[7,0]],[[5,6],4]]],
[[[8,0],[[4,3],[7,4]]],[[3,[7,9]],[[7,3],6]]],
[[3,[5,[0,3]]],[5,4]],
[[[[1,2],[6,3]],1],[[7,[5,2]],[[8,8],7]]],
[[4,[[8,0],[7,1]]],[[8,[8,0]],[[1,5],3]]]
]
m_v = 0
for l, r in itertools.permutations(inp, 2):
l = copy.deepcopy(l)
r = copy.deepcopy(r)
v = [l, r]
print(f"{l=} {r=}")
do_add(v)
m_v = max(do_mag(TreeZipper(v, [])), m_v)
print(m_v)
| [
[
[
23,
34
]
],
[
[
42,
51
],
[
11330,
11339
]
],
[
[
59,
63
],
[
3329,
3333
],
[
3367,
3371
]
],
[
[
89,
98
],
[
125,
134
]
],
[
[
118,
121
],
[
164,
167
]
],
[
[
141,
151
],
[
8001,
8011
],
[
11487,
11497
],
[
248,
258
],
[
660,
670
],
[
820,
830
],
[
938,
948
],
[
3196,
3206
],
[
3689,
3699
],
[
3926,
3936
]
],
[
[
888,
893
],
[
3009,
3014
],
[
3434,
3439
],
[
3820,
3825
]
],
[
[
921,
934
],
[
3065,
3078
],
[
3143,
3156
],
[
3755,
3768
]
],
[
[
3178,
3192
],
[
3531,
3545
],
[
3599,
3613
],
[
3787,
3801
]
],
[
[
3624,
3632
],
[
7874,
7882
],
[
7931,
7939
]
],
[
[
3916,
3922
],
[
7994,
8000
],
[
11480,
11486
],
[
4011,
4017
],
[
4038,
4044
]
],
[
[
4062,
4065
]
],
[
[
7337,
7340
],
[
7972,
7975
]
],
[
[
7825,
7831
],
[
7965,
7971
],
[
11456,
11462
]
],
[
[
7959,
7962
],
[
7983,
7986
],
[
8012,
8015
]
],
[
[
8030,
8034
],
[
11370,
11374
],
[
11395,
11399
]
],
[
[
8035,
8038
],
[
11353,
11356
]
],
[
[
11310,
11313
],
[
11507,
11510
],
[
11519,
11522
]
],
[
[
11322,
11323
],
[
11384,
11385
]
],
[
[
11325,
11326
],
[
11409,
11410
]
],
[
[
11366,
11367
],
[
11422,
11423
],
[
11441,
11442
]
],
[
[
11391,
11392
],
[
11425,
11426
],
[
11446,
11447
]
],
[
[
11417,
11418
],
[
11463,
11464
],
[
11498,
11499
]
],
[
[
11470,
11473
],
[
11507,
11510
],
[
11519,
11522
]
]
] |
import json
from unittest import TestCase
from unittest.mock import Mock
from utils import protocols
from api.ontology import OntologyAPI
from utils.protocols import ONTOLOGY_3PRIME_PARENT, ONTOLOGY_5PRIME_PARENT, ONTOLOGY_CITESEQ
class TestProtocols(TestCase):
def setUp(self) -> None:
self.ontology_api = Mock()
def test_is_10x__when_equal_3prime_parent__returns_true(self):
# given
lib_prep_protocol = {
'content': {
'library_construction_method': {
'ontology': ONTOLOGY_3PRIME_PARENT
}
}
}
# when
is10x = protocols.is_10x(OntologyAPI(), lib_prep_protocol)
# then
self.assertTrue(is10x)
def test_is_10x__when_equal_5prime_parent__returns_true(self):
# given
lib_prep_protocol = {
'content': {
'library_construction_method': {
'ontology': ONTOLOGY_5PRIME_PARENT
}
}
}
# when
is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol)
# then
self.assertTrue(is10x)
def test_is_10x__when_equal_citeseq__returns_true(self):
# given
lib_prep_protocol = {
'content': {
'library_construction_method': {
'ontology': ONTOLOGY_CITESEQ
}
}
}
# when
is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol)
# then
self.assertTrue(is10x)
def test_is_10x__when_not_descendant__returns_false(self):
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0000000",
}
}
}
self.ontology_api.is_equal_or_descendant = Mock(return_value=False)
is10x = protocols.is_10x(self.ontology_api, lib_prep_protocol)
self.assertFalse(is10x)
def test_map_bam_schema__when_equals_citeseq__returns_10xV2(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": ONTOLOGY_CITESEQ,
}
}
}
# when
bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, '10xV2')
def test_map_bam_schema__when_not_leaf_term__returns_none(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0000000",
}
}
}
self.ontology_api.get_descendants = Mock(return_value=['descendant']) # not leaf term
self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"})
# when
bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, None)
def test_map_bam_schema__when_leaf_term__returns_correct_bam_schema(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0000000",
}
}
}
self.ontology_api.get_descendants = Mock(return_value=[]) # leaf term
self.ontology_api.search = Mock(return_value={'ontology_name': 'name', 'iri': 'iri', 'label': "10x 5' v2"})
# when
bam_schema = protocols.map_10x_bam_schema(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, '10xV2')
def test_version_10x_by_label__given_label__return_version(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0009294",
}
}
}
self.ontology_api.search = Mock(return_value={'label': "10x 5' v2"})
# when
bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, 'V2')
def test_version_10x_by_label__given_label__return_version(self):
# given
lib_prep_protocol = {
"content": {
"library_construction_method": {
"ontology": "EFO:0009294",
}
}
}
self.ontology_api.search = Mock(return_value={'label': "10x 3' v3"})
# when
bam_schema = protocols.version_10x_by_label(self.ontology_api, lib_prep_protocol)
# then
self.assertEqual(bam_schema, 'V3')
| [
[
[
7,
11
]
],
[
[
33,
41
],
[
253,
261
]
],
[
[
68,
72
],
[
321,
325
],
[
1878,
1882
],
[
2780,
2784
],
[
2865,
2869
],
[
3443,
3447
],
[
3512,
3516
],
[
4075,
4079
],
[
4598,
4602
]
],
[
[
91,
100
],
[
645,
654
],
[
1060,
1069
],
[
1467,
1476
],
[
1919,
1928
],
[
2327,
2336
],
[
2983,
2992
],
[
3630,
3639
],
[
4154,
4163
],
[
4677,
4686
]
],
[
[
126,
137
],
[
662,
673
]
],
[
[
166,
188
],
[
548,
570
]
],
[
[
190,
212
],
[
963,
985
]
],
[
[
214,
230
],
[
1376,
1392
],
[
2230,
2246
]
],
[
[
239,
252
]
]
] |
from sumy.parsers.plaintext import PlaintextParser #We're choosing a plaintext parser here, other parsers available for HTML etc.
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.lex_rank import LexRankSummarizer #We're choosing Lexrank, other algorithms are also built in
def get_summary(text):
# file = "plain_text.txt" #name of the plain-text file
# parser = PlaintextParser.from_file(file, Tokenizer("english"))
parser=PlaintextParser.from_string(text,Tokenizer("English"))
summarizer = LexRankSummarizer()
summary = summarizer(parser.document, 5) #Summarize the document with 5 sentences
# for sentence in summary:
# print(sentence)
return summary
| [
[
[
35,
50
],
[
454,
469
]
],
[
[
162,
171
],
[
487,
496
]
],
[
[
210,
227
],
[
526,
543
]
],
[
[
294,
305
]
]
] |
import pandas as pd
import cv2
import numpy as np
dataset_path = 'fer2013/fer2013/fer2013.csv'
image_size=(48,48)
def load_fer2013():
data = pd.read_csv(dataset_path)
pixels = data['pixels'].tolist()
width, height = 48, 48
faces = []
for pixel_sequence in pixels:
face = [int(pixel) for pixel in pixel_sequence.split(' ')]
face = np.asarray(face).reshape(width, height)
face = cv2.resize(face.astype('uint8'),image_size)
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = pd.get_dummies(data['emotion']).as_matrix()
return faces, emotions
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x | [
[
[
7,
19
],
[
152,
154
],
[
644,
646
]
],
[
[
27,
30
],
[
456,
459
]
],
[
[
38,
49
],
[
397,
399
],
[
565,
567
],
[
599,
601
]
],
[
[
52,
64
],
[
164,
176
]
],
[
[
97,
107
],
[
488,
498
]
],
[
[
121,
133
]
],
[
[
724,
740
]
]
] |
import json
from time import sleep
import gspread
import requests
from gspread_formatting import *
from oauth2client.service_account import ServiceAccountCredentials
class Spreadsheet:
# comment out all but one of these depending on which spreadsheet being used
# URL = 'https://docs.google.com/spreadsheets/d/1WhExw_ReHnyPQYXl0p-kT6jYXpZW5w8-cq2ffK7niOs' # Sample Deep Space Scouting Sheet Machine
# URL = 'https://docs.google.om/spreadsheets/d/1lOTML4TgNqv5OKUJU32keWu62__T9cFT3IL52kmPbKk' # Bethesda Week 2 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1C8hjCqMZmacyUe3SlRgW4o4HGqTRFozviK4WZ6Mu4yc' # Week 0 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1uYb9n_2IaGSRvOPZcuE59eUQjinaTSIN1SKqTQ6z2lQ' # Dickinson Center Week 0 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1_8tFjgxjGVA0__1BLkMV-ookfPLrnGDE8gZj6pQc1_k' # Centurion-KnightKrawler Week 0 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1Ftzcn5u5axYUkob1MXI8wV1KAD-8qjGkywqQjP4_AMo' # Haymarket Week 1 Scouting Sheet Machine
# URL = 'https://docs.google.com/spreadsheets/d/1fRm4nZIT457zIpW5cyZrIvR0gSGt6oEcphVYiaH6eK8' # Owings Mills Week 3 Scouting Sheet Machine
URL = 'https://docs.google.com/spreadsheets/d/1y8xtKJftg1mDbhfcmISWkyi4MgmSauveD9BY2bPNUCo/edit#gid=168604214' # CHCMP Scouting Sheet Machine
# google sheets setup
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name('client_secret_gsheets.json', scope)
client = gspread.authorize(creds)
# google sheets document
sheet = client.open_by_url(URL)
# individual worksheets of google sheets document
key_worksheet = sheet.worksheet('Key')
teams_worksheet = sheet.worksheet('Teams')
sample_team_worksheet = sheet.worksheet('Sample Team')
schedule_worksheet = sheet.worksheet('Schedule')
team_data_worksheet = sheet.worksheet('Team Data')
# setting event key to value in A1 of Key worksheet
event_key = key_worksheet.cell(1, 1).value
# 2537 cell format
format_2537 = CellFormat(backgroundColor=Color(.148, .98, .216)) # 25fa37 converted to rgb out of 1
# tba setup
tba_session = requests.Session()
BASE_URL = 'https://www.thebluealliance.com/api/v3'
# tba credentials setup
with open('client_secret_tba.json') as json_file:
data = json.load(json_file)
tba_auth_key = data['tba_auth_key']
def __init__(self):
"""All TBA requests will have authentication key in header"""
self.tba_session.headers.update({'X-TBA-Auth-Key': self.tba_auth_key})
def get_teams_from_event(self, event):
"""Returns all team keys from event in a list
event: event key of intended competition (e.g. 2018vahay)
"""
teams_raw = self.tba_session.get(self.BASE_URL + '/event/%s/teams/keys' % event).json()
teams = []
for team_raw in teams_raw:
teams.append(team_raw[3:])
return teams
def fill_teams(self, sheet, event):
"""Fills first column of specified sheet with all teams from specified sheet
sheet: intended google sheet
event: event key of intended competition (e.g. 2018vahay)
"""
column = []
for team in self.get_teams_from_event(event):
column.append(team)
for index in range(0, len(column)):
sheet.update_cell(index + 1, 1, column[index])
def create_team_sheets(self):
"""Creates a scouting sheet for each team in competition
event: event key of intended competition (e.g. 2018 vahay)
"""
teams = self.teams_worksheet.col_values(1)
for team in teams:
self.sheet.add_worksheet(team, self.sample_team_worksheet.row_count, self.sample_team_worksheet.col_count)
def delete_team_sheets(self):
"""Deletes all individual team worksheets
Used for testing
"""
teams = self.teams_worksheet.col_values(1)
for team in teams:
self.sheet.del_worksheet(self.sheet.worksheet(team))
def get_sample_sheet(self):
"""Returns the sample team sheet in 2D list format [row][column]"""
sample_sheet = []
for row in range(1, self.sample_team_worksheet.row_count + 1):
sample_sheet.append(self.sample_team_worksheet.row_values(row, value_render_option='FORMULA'))
return sample_sheet
def copy_sheet(self, copy_from, copy_to, team_num):
"""Copies every element from a list of values to a specified sheet
copy_from: list from which values are copied
copy_to: sheet to which values are copied
"""
i, j = 1, 1
for row in copy_from:
for col in row:
if col == 'Team #':
copy_to.update_cell(i, j, team_num)
sleep(1.01)
elif col != '':
copy_to.update_cell(i, j, col)
sleep(1.01) # Quota is 100 requests per 100s, this does 100 requests per 101s
j += 1
i += 1
j = 1
def copy_sample_to_team_sheets(self):
"""Copies sample sheet format to every team sheet"""
sample_sheet = self.get_sample_sheet()
for team in self.teams_worksheet.col_values(1):
self.copy_sheet(sample_sheet, self.sheet.worksheet(team), team)
def get_color_schedule(self, event, color):
"""Returns match schedule of specified color alliance in list
event: event key of intended competition (e.g. 2018vahay)
color: color of desired alliance schedule (e.g. red or blue)
"""
# event schedules get updated to elims event schedules once elims are reached
# only elims schedule accessible in finished events
schedule = []
event_list = self.tba_session.get(self.BASE_URL + '/event/%s/matches/simple' % event).json() # list of dicts
for match in event_list:
schedule.append(match['alliances'][color]['team_keys'])
for alliance in schedule:
for i in range(len(alliance)):
alliance[i] = alliance[i][3:]
# trims 'frc' from beginning of every team number
return schedule
def fill_schedule(self, event):
"""Auto fills Schedule worksheet with schedule
event: event key of intended competition (e.g. 2018vahay)
"""
red_schedule = self.get_color_schedule(event, 'red')
blue_schedule = self.get_color_schedule(event, 'blue')
# updates num_matches to the correct number of matches and fill column 1 of spreadsheet with match number
num_matches = 1
for match in range(len(red_schedule)):
self.schedule_worksheet.update_cell(match + 1, 1, match + 1)
num_matches += 1
sleep(1.01)
for i in range(num_matches):
for j in range(3):
self.schedule_worksheet.update_cell(i + 1, j + 2, red_schedule[i][j])
sleep(1.01)
self.schedule_worksheet.update_cell(i + 1, j + 5, blue_schedule[i][j])
sleep(1.01)
def get_team_metrics_from_event(self, event):
"""Returns OPRs, DPRs, and CCWMs of all teams at event in dictionary of dictionaries
event: event key of intended competition (e.g. 2018vahay)
"""
return self.tba_session.get(self.BASE_URL + '/event/%s/oprs' % event).json()
def fill_team_data(self, event):
"""Auto fills Team Data worksheet with teams and their corresponding OPR, DPR, and CCWM
event: event key if intended competition (e.g. 2018vahay)
"""
teams = self.get_teams_from_event(event)
metrics = self.get_team_metrics_from_event(event)
row = 2
team_col, opr_col, dpr_col, ccwm_col = 1, 2, 3, 4
for team in teams:
self.team_data_worksheet.update_cell(row, team_col, team)
sleep(1.01)
self.team_data_worksheet.update_cell(row, opr_col, metrics['oprs']['frc' + team])
sleep(1.01)
self.team_data_worksheet.update_cell(row, dpr_col, metrics['dprs']['frc' + team])
sleep(1.01)
self.team_data_worksheet.update_cell(row, ccwm_col, metrics['ccwms']['frc' + team])
sleep(1.01)
row += 1
def get_predictions_from_event(self, event):
return self.tba_session.get(self.BASE_URL + '/event/%s/predictions' % event).json()
def format_cells_in_schedule(self):
cells_2537_raw = self.schedule_worksheet.findall('2537')
cells_2537 = []
for cell in cells_2537_raw:
cells_2537.append([cell.col + 64, cell.row]) # add 64 to column to match ascii character decimals
for cell in cells_2537:
b = bytes(str(cell[0]), 'utf8')
ascii_char = b.decode('ascii')
cell[0] = chr(int(ascii_char))
for i in range(len(cells_2537)):
format_cell_range(self.schedule_worksheet, '%s%i:%s%i' % (cells_2537[i][0], cells_2537[i][1], cells_2537[i][0], cells_2537[i][1]), self.format_2537)
def main(self):
self.fill_teams(self.teams_worksheet, self.event_key)
self.create_team_sheets()
# self.delete_team_sheets()
# print(self.get_sample_sheet())
# self.copy_sheet(self.get_sample_sheet(), self.sheet.worksheet('1086'), 1086) # testing on single sheet
# print(len(self.get_sample_sheet()))
self.copy_sample_to_team_sheets()
# print(self.get_color_schedule(self.event_key, 'red'))
self.fill_schedule(self.event_key)
self.fill_team_data(self.event_key)
# print(self.get_team_metrics_from_event(self.event_key))
# print(self.get_predictions_from_event(self.event_key))
self.format_cells_in_schedule()
if __name__ == '__main__':
spreadsheet = Spreadsheet()
spreadsheet.main()
| [
[
[
7,
11
],
[
2449,
2453
]
],
[
[
29,
34
],
[
4965,
4970
],
[
5080,
5085
],
[
6977,
6982
],
[
7159,
7164
],
[
7274,
7279
],
[
8101,
8106
],
[
8219,
8224
],
[
8337,
8342
],
[
8457,
8462
]
],
[
[
43,
50
],
[
1602,
1609
]
],
[
[
58,
66
],
[
2276,
2284
]
],
[
[
98,
99
],
[
2155,
2165
],
[
2182,
2187
],
[
9123,
9140
]
],
[
[
141,
166
],
[
1503,
1528
]
],
[
[
175,
186
],
[
10037,
10048
]
],
[
[
10023,
10034
],
[
10055,
10066
]
]
] |
import logging
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
from torch.utils.data.distributed import DistributedSampler
from .dataset import CheXpert
def _get_mean_and_std(dataset: Dataset):
"""Compute the mean and std of dataset."""
data_loader = DataLoader(dataset, batch_size=1, shuffle=False)
mean = torch.zeros(3)
std = torch.zeros(3)
for i, (img, _) in enumerate(data_loader):
if i % 1000 == 0:
print(i)
mean += img.mean(dim=(0, 2, 3))
std += img.std(dim=(0, 2, 3))
mean /= len(data_loader)
std /= len(data_loader)
return mean, std
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_chexpert():
CHEXPERT_MEAN = [0.503, 0.503, 0.503]
CHEXPERT_STD = [0.291, 0.291, 0.291]
image_size = 256
train_transform = transforms.Compose(
[
# transforms.ToPILImage(),
transforms.RandomResizedCrop(image_size),
# transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),
]
)
# train_transform.transforms.append(Cutout(16))
test_transform = transforms.Compose(
[
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(CHEXPERT_MEAN, CHEXPERT_STD),
]
)
return train_transform, test_transform
# for centralized training
def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, policy="zeros"):
return get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs, policy=policy)
# for local devices
def get_dataloader_test(dataset, datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy="zeros"):
return get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train, dataidxs_test, policy=policy)
def get_dataloader_chexpert(datadir, train_bs, test_bs, dataidxs=None, policy="zeros"):
dl_obj = CheXpert
transform_train, transform_test = _data_transforms_chexpert()
train_ds = dl_obj(
datadir,
dataidxs=dataidxs,
train=True,
transform=transform_train,
download=False,
policy=policy,
)
test_ds = dl_obj(
datadir,
dataidxs=None,
train=False,
transform=transform_test,
download=False,
policy=policy,
)
train_dl = DataLoader(
dataset=train_ds,
batch_size=train_bs,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=4,
)
test_dl = DataLoader(
dataset=test_ds,
batch_size=test_bs,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=4,
)
return train_dl, test_dl
def get_dataloader_test_chexpert(datadir, train_bs, test_bs, dataidxs_train=None, dataidxs_test=None, policy="zeros"):
dl_obj = CheXpert
transform_train, transform_test = _data_transforms_chexpert()
train_ds = dl_obj(
datadir,
dataidxs=dataidxs_train,
train=True,
transform=transform_train,
download=True,
policy=policy,
)
test_ds = dl_obj(
datadir,
dataidxs=dataidxs_test,
train=False,
transform=transform_test,
download=True,
policy=policy,
)
train_dl = DataLoader(
dataset=train_ds,
batch_size=train_bs,
shuffle=True,
drop_last=False,
pin_memory=True,
num_workers=4,
)
test_dl = DataLoader(
dataset=test_ds,
batch_size=test_bs,
shuffle=False,
drop_last=False,
pin_memory=True,
num_workers=4,
)
return train_dl, test_dl
def distributed_centralized_chexpert_loader(dataset, data_dir, world_size, rank, batch_size):
"""
Used for generating distributed dataloader for
accelerating centralized training
"""
train_bs = batch_size
test_bs = batch_size
transform_train, transform_test = _data_transforms_chexpert()
train_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=True, transform=transform_train)
test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test)
train_sam = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank)
test_sam = DistributedSampler(test_dataset, num_replicas=world_size, rank=rank)
train_dl = data.DataLoader(
train_dataset,
batch_size=train_bs,
sampler=train_sam,
pin_memory=True,
num_workers=4,
)
test_dl = data.DataLoader(
test_dataset,
batch_size=test_bs,
sampler=test_sam,
pin_memory=True,
num_workers=4,
)
class_num = 1000
train_data_num = len(train_dataset)
test_data_num = len(test_dataset)
return train_data_num, test_data_num, train_dl, test_dl, None, None, None, class_num
def load_partition_data_chexpert(
data_dir,
partition_method="random",
partition_alpha=None,
client_number=100,
batch_size=10,
policy="zeros",
):
transform_train, transform_test = _data_transforms_chexpert()
train_dataset = CheXpert(
data_dir=data_dir,
dataidxs=None,
train=True,
transform=transform_train,
policy=policy,
)
test_dataset = CheXpert(data_dir=data_dir, dataidxs=None, train=False, transform=transform_test, policy=policy)
# get local dataset
if partition_method == "random":
num_train_items = int(len(train_dataset) / client_number)
num_test_items = int(len(test_dataset) / client_number)
dict_client = {}
all_train_idxs = list(range(len(train_dataset)))
all_test_idxs = list(range(len(test_dataset)))
for client_idx in range(client_number):
dict_client[client_idx] = {}
dict_client[client_idx]["train"] = set(np.random.choice(all_train_idxs, num_train_items, replace=False))
dict_client[client_idx]["test"] = set(np.random.choice(all_test_idxs, num_test_items, replace=False))
all_train_idxs = list(set(all_train_idxs) - dict_client[client_idx]["train"])
all_test_idxs = list(set(all_test_idxs) - dict_client[client_idx]["test"])
if len(all_train_idxs) > 0:
all_client_idxs = list(range(client_number))
np.random.shuffle(all_client_idxs)
choiced_client_idxs = all_client_idxs[: len(all_train_idxs)]
for idx, client_idx in enumerate(choiced_client_idxs):
dict_client[client_idx]["train"].add(all_train_idxs[idx])
if len(all_test_idxs) > 0:
all_client_idxs = list(range(client_number))
np.random.shuffle(all_client_idxs)
choiced_client_idxs = all_client_idxs[: len(all_test_idxs)]
for idx, client_idx in enumerate(choiced_client_idxs):
dict_client[client_idx]["test"].add(all_test_idxs[idx])
else:
raise NotImplementedError
# build dataloader
train_dl = []
test_dl = []
for client_idx in range(client_number):
train_data_idxs = list(dict_client[client_idx]["train"])
test_data_idxs = list(dict_client[client_idx]["test"])
train_dl_, test_dl_ = get_dataloader_test_chexpert(
datadir=data_dir,
dataidxs_train=train_data_idxs,
dataidxs_test=test_data_idxs,
train_bs=batch_size,
test_bs=batch_size,
policy=policy,
)
train_dl.append(train_dl_)
test_dl.append(test_dl_)
logging.info(f"Client {client_idx} train data num: {len(train_dl_)} test data num: {len(test_dl_)}")
logging.info("Partition data done")
# logging.info("Partition data for each client: {}".format(dict_client))
train_data_num = len(train_dataset)
test_data_num = len(test_dataset)
train_data_global = train_dataset
test_data_global = test_dataset
data_local_num_dict = {
client_idx: len(dict_client[client_idx]["train"]) + len(dict_client[client_idx]["test"])
for client_idx in range(client_number)
}
train_data_local_dict = {client_idx: train_dl_ for client_idx, train_dl_ in enumerate(train_dl)}
test_data_local_dict = {client_idx: test_dl_ for client_idx, test_dl_ in enumerate(test_dl)}
class_num = train_dataset.num_classes
return (
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
)
if __name__ == "__main__":
data_path = os.path.join("D:\\", "dataset", "CheXpert", "CheXpert-v1.0-small")
data = CheXpert(data_dir=data_path, transform=transforms.ToTensor())
print(len(data))
print(data[0][0])
print(data[0][1])
# mean, std = _get_mean_and_std(data)
# print(mean, std)
# train_transform, valid_transform = _data_transforms_chexpert()
# print(train_transform)
# print(valid_transform)
(
train_data_num,
test_data_num,
train_data_global,
test_data_global,
data_local_num_dict,
train_data_local_dict,
test_data_local_dict,
class_num,
) = load_partition_data_chexpert(data_dir=data_path, client_number=10, batch_size=10, policy="zeros")
print(train_data_num, test_data_num, class_num)
| [
[
[
7,
14
],
[
8291,
8298
],
[
8397,
8404
]
],
[
[
23,
25
],
[
9355,
9357
]
],
[
[
33,
44
],
[
870,
872
],
[
886,
888
],
[
910,
912
],
[
943,
945
],
[
978,
980
],
[
1027,
1029
],
[
1076,
1078
],
[
1125,
1127
],
[
6600,
6602
],
[
6716,
6718
],
[
7062,
7064
],
[
7415,
7417
]
],
[
[
52,
57
],
[
410,
415
],
[
435,
440
],
[
1210,
1215
]
],
[
[
87,
94
],
[
275,
282
]
],
[
[
96,
106
],
[
350,
360
],
[
3057,
3067
],
[
3239,
3249
],
[
4021,
4031
],
[
4203,
4213
]
],
[
[
114,
150
],
[
9472,
9482
],
[
1470,
1480
],
[
1551,
1561
],
[
1654,
1664
],
[
1689,
1699
],
[
1831,
1841
],
[
1873,
1883
],
[
1920,
1930
],
[
1955,
1965
]
],
[
[
192,
210
],
[
4940,
4958
],
[
5025,
5043
]
],
[
[
233,
241
],
[
9433,
9441
],
[
2619,
2627
],
[
3570,
3578
],
[
4740,
4748
],
[
4841,
4849
],
[
5871,
5879
],
[
6034,
6042
]
],
[
[
248,
265
]
],
[
[
708,
714
]
],
[
[
1313,
1338
],
[
2667,
2692
],
[
3618,
3643
],
[
4692,
4717
],
[
5822,
5847
]
],
[
[
2099,
2113
]
],
[
[
2297,
2316
]
],
[
[
2522,
2545
],
[
2194,
2217
]
],
[
[
3442,
3470
],
[
2413,
2441
],
[
7966,
7994
]
],
[
[
4406,
4445
]
],
[
[
5618,
5646
],
[
9978,
10006
]
],
[
[
9343,
9352
],
[
9451,
9460
],
[
10016,
10025
]
],
[
[
9426,
9430
],
[
9509,
9513
],
[
9526,
9530
],
[
9548,
9552
],
[
5110,
5114
],
[
5274,
5278
]
],
[
[
9769,
9783
],
[
10087,
10101
]
],
[
[
9793,
9806
],
[
10103,
10116
]
],
[
[
9816,
9833
]
],
[
[
9843,
9859
]
],
[
[
9869,
9888
]
],
[
[
9898,
9919
]
],
[
[
9929,
9949
]
],
[
[
9959,
9968
],
[
10118,
10127
]
]
] |
import json
from collections import Iterable
from pathlib import Path
import cadquery as cq
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from cadquery import exporters
import paramak
from paramak.neutronics_utils import (add_stl_to_moab_core,
define_moab_core_and_tags)
from paramak.utils import get_hash
class Reactor:
"""The Reactor object allows shapes and components to be added and then
collective operations to be performed on them. Combining all the shapes is
required for creating images of the whole reactor and creating a Graveyard
(bounding box) that is needed for neutronics simulations.
Args:
shapes_and_components (list): list of paramak.Shape
"""
def __init__(self, shapes_and_components):
self.material_tags = []
self.stp_filenames = []
self.stl_filenames = []
self.tet_meshes = []
self.graveyard = None
self.solid = None
self.shapes_and_components = shapes_and_components
self.reactor_hash_value = None
self.graveyard_offset = None # set by the make_graveyard method
@property
def stp_filenames(self):
values = []
for shape_or_component in self.shapes_and_components:
values.append(shape_or_component.stp_filename)
return values
@stp_filenames.setter
def stp_filenames(self, value):
self._stp_filenames = value
@property
def stl_filenames(self):
values = []
for shape_or_component in self.shapes_and_components:
values.append(shape_or_component.stl_filename)
return values
@stl_filenames.setter
def stl_filenames(self, value):
self._stl_filenames = value
@property
def largest_dimension(self):
"""Calculates a bounding box for the Reactor and returns the largest
absolute value of the largest dimension of the bounding box"""
largest_dimension = 0
for component in self.shapes_and_components:
largest_dimension = max(
largest_dimension,
component.largest_dimension)
self._largest_dimension = largest_dimension
return largest_dimension
@largest_dimension.setter
def largest_dimension(self, value):
self._largest_dimension = value
@property
def material_tags(self):
"""Returns a set of all the materials_tags used in the Reactor
(excluding the plasma)"""
values = []
for shape_or_component in self.shapes_and_components:
if isinstance(
shape_or_component,
(paramak.Plasma,
paramak.PlasmaFromPoints,
paramak.PlasmaBoundaries)) is False:
values.append(shape_or_component.material_tag)
return values
@material_tags.setter
def material_tags(self, value):
self._material_tags = value
@property
def tet_meshes(self):
values = []
for shape_or_componet in self.shapes_and_components:
values.append(shape_or_componet.tet_mesh)
return values
@tet_meshes.setter
def tet_meshes(self, value):
self._tet_meshes = value
@property
def shapes_and_components(self):
"""Adds a list of parametric shape(s) and or parametric component(s)
to the Reactor object. This allows collective operations to be
performed on all the shapes in the reactor. When adding a shape or
component the stp_filename of the shape or component should be unique"""
if hasattr(self, "create_solids"):
ignored_keys = ["reactor_hash_value"]
if get_hash(self, ignored_keys) != self.reactor_hash_value:
self.create_solids()
self.reactor_hash_value = get_hash(self, ignored_keys)
return self._shapes_and_components
@shapes_and_components.setter
def shapes_and_components(self, value):
if not isinstance(value, Iterable):
raise ValueError("shapes_and_components must be a list")
self._shapes_and_components = value
@property
def graveyard_offset(self):
return self._graveyard_offset
@graveyard_offset.setter
def graveyard_offset(self, value):
if value is None:
self._graveyard_offset = None
elif not isinstance(value, (float, int)):
raise ValueError("graveyard_offset must be a number")
elif value < 0:
raise ValueError("graveyard_offset must be positive")
self._graveyard_offset = value
@property
def solid(self):
"""This combines all the parametric shapes and compents in the reactor
object and rotates the viewing angle so that .solid operations in
jupyter notebook.
"""
list_of_cq_vals = []
for shape_or_compound in self.shapes_and_components:
if isinstance(
shape_or_compound.solid,
cq.occ_impl.shapes.Compound):
for solid in shape_or_compound.solid.Solids():
list_of_cq_vals.append(solid)
else:
list_of_cq_vals.append(shape_or_compound.solid.val())
compound = cq.Compound.makeCompound(list_of_cq_vals)
compound = compound.rotate(
startVector=(0, 1, 0), endVector=(0, 0, 1), angleDegrees=180
)
return compound
@solid.setter
def solid(self, value):
self._solid = value
def neutronics_description(self, include_plasma=False,
include_graveyard=True
):
"""A description of the reactor containing material tags, stp filenames,
and tet mesh instructions. This is used for neutronics simulations which
require linkage between volumes, materials and identification of which
volumes to tet mesh. The plasma geometry is not included by default as
it is typically not included in neutronics simulations. The reason for
this is that the low number density results in minimal interaction with
neutrons. However, it can be added if the include_plasma argument is set
to True.
Returns:
dictionary: a dictionary of materials and filenames for the reactor
"""
neutronics_description = []
for entry in self.shapes_and_components:
if include_plasma is False and isinstance(
entry,
(paramak.Plasma,
paramak.PlasmaFromPoints,
paramak.PlasmaBoundaries)) is True:
continue
if entry.stp_filename is None:
raise ValueError(
"Set Shape.stp_filename for all the \
Reactor entries before using this method"
)
if entry.material_tag is None:
raise ValueError(
"set Shape.material_tag for all the \
Reactor entries before using this method"
)
neutronics_description.append(entry.neutronics_description())
# This add the neutronics description for the graveyard which is unique
# as it is automatically calculated instead of being added by the user.
# Also the graveyard must have 'Graveyard' as the material name
if include_graveyard is True:
self.make_graveyard()
neutronics_description.append(
self.graveyard.neutronics_description())
return neutronics_description
def export_neutronics_description(
self,
filename="manifest.json",
include_plasma=False,
include_graveyard=True):
"""
Saves Reactor.neutronics_description to a json file. The resulting json
file contains a list of dictionaries. Each dictionary entry comprises
of a material and a filename and optionally a tet_mesh instruction. The
json file can then be used with the neutronics workflows to create a
neutronics model. Creating of the neutronics model requires linkage
between volumes, materials and identification of which volumes to
tet_mesh. If the filename does not end with .json then .json will be
added. The plasma geometry is not included by default as it is
typically not included in neutronics simulations. The reason for this
is that the low number density results in minimal interactions with
neutrons. However, the plasma can be added if the include_plasma
argument is set to True.
Args:
filename (str, optional): the filename used to save the neutronics
description
include_plasma (Boolean, optional): should the plasma be included.
Defaults to False as the plasma volume and material has very
little impact on the neutronics results due to the low density.
Including the plasma does however slow down the simulation.
include_graveyard (Boolean, optional): should the graveyard be
included. Defaults to True as this is needed for DAGMC models.
"""
path_filename = Path(filename)
if path_filename.suffix != ".json":
path_filename = path_filename.with_suffix(".json")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
with open(path_filename, "w") as outfile:
json.dump(
self.neutronics_description(
include_plasma=include_plasma,
include_graveyard=include_graveyard,
),
outfile,
indent=4,
)
print("saved geometry description to ", path_filename)
return str(path_filename)
def export_stp(self, output_folder="", graveyard_offset=100,
mode='solid'):
"""Writes stp files (CAD geometry) for each Shape object in the reactor
and the graveyard.
Args:
output_folder (str): the folder for saving the stp files to
graveyard_offset (float, optional): the offset between the largest
edge of the geometry and inner bounding shell created. Defaults
to 100.
mode (str, optional): the object to export can be either
'solid' which exports 3D solid shapes or the 'wire' which
exports the wire edges of the shape. Defaults to 'solid'.
Returns:
list: a list of stp filenames created
"""
if len(self.stp_filenames) != len(set(self.stp_filenames)):
raise ValueError(
"Set Reactor already contains a shape or component \
with this stp_filename",
self.stp_filenames,
)
filenames = []
for entry in self.shapes_and_components:
if entry.stp_filename is None:
raise ValueError(
"set .stp_filename property for \
Shapes before using the export_stp method"
)
filenames.append(
str(Path(output_folder) / Path(entry.stp_filename)))
entry.export_stp(
filename=Path(output_folder) / Path(entry.stp_filename),
mode=mode
)
# creates a graveyard (bounding shell volume) which is needed for
# nuetronics simulations
self.make_graveyard(graveyard_offset=graveyard_offset)
filenames.append(
str(Path(output_folder) / Path(self.graveyard.stp_filename)))
self.graveyard.export_stp(
Path(output_folder) / Path(self.graveyard.stp_filename)
)
return filenames
def export_stl(self, output_folder="", tolerance=0.001):
"""Writes stl files (CAD geometry) for each Shape object in the reactor
:param output_folder: the folder for saving the stp files to
:type output_folder: str
:param tolerance: the precision of the faceting
:type tolerance: float
:return: a list of stl filenames created
:rtype: list
"""
if len(self.stl_filenames) != len(set(self.stl_filenames)):
raise ValueError(
"Set Reactor already contains a shape or component \
with this stl_filename",
self.stl_filenames,
)
filenames = []
for entry in self.shapes_and_components:
print("entry.stl_filename", entry.stl_filename)
if entry.stl_filename is None:
raise ValueError(
"set .stl_filename property for \
Shapes before using the export_stl method"
)
filenames.append(
str(Path(output_folder) / Path(entry.stl_filename)))
entry.export_stl(
Path(output_folder) /
Path(
entry.stl_filename),
tolerance)
# creates a graveyard (bounding shell volume) which is needed for
# nuetronics simulations
self.make_graveyard()
filenames.append(
str(Path(output_folder) / Path(self.graveyard.stl_filename)))
self.graveyard.export_stl(
Path(output_folder) / Path(self.graveyard.stl_filename)
)
print("exported stl files ", filenames)
return filenames
def export_h5m(
self,
filename='dagmc.h5m',
skip_graveyard=False,
tolerance=0.001,
graveyard_offset=100):
"""Converts stl files into DAGMC compatible h5m file using PyMOAB. The
DAGMC file produced has not been imprinted and merged unlike the other
supported method which uses Trelis to produce an imprinted and merged
DAGMC geometry. If the provided filename doesn't end with .h5m it will
be added
Args:
filename (str, optional): filename of h5m outputfile
Defaults to "dagmc.h5m".
skip_graveyard (boolean, optional): filename of h5m outputfile
Defaults to False.
tolerance (float, optional): the precision of the faceting
Defaults to 0.001.
graveyard_offset (float, optional): the offset between the largest
edge of the geometry and inner bounding shell created. Defaults
to 100.
Returns:
filename: output h5m filename
"""
path_filename = Path(filename)
if path_filename.suffix != ".h5m":
path_filename = path_filename.with_suffix(".h5m")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
moab_core, moab_tags = define_moab_core_and_tags()
surface_id = 1
volume_id = 1
for item in self.shapes_and_components:
item.export_stl(item.stl_filename, tolerance=tolerance)
moab_core = add_stl_to_moab_core(
moab_core,
surface_id,
volume_id,
item.material_tag,
moab_tags,
item.stl_filename)
volume_id += 1
surface_id += 1
if skip_graveyard is False:
self.make_graveyard(graveyard_offset=graveyard_offset)
self.graveyard.export_stl(self.graveyard.stl_filename)
volume_id = 2
surface_id = 2
moab_core = add_stl_to_moab_core(
moab_core,
surface_id,
volume_id,
self.graveyard.material_tag,
moab_tags,
self.graveyard.stl_filename
)
all_sets = moab_core.get_entities_by_handle(0)
file_set = moab_core.create_meshset()
moab_core.add_entities(file_set, all_sets)
moab_core.write_file(str(path_filename))
return filename
def export_physical_groups(self, output_folder=""):
"""Exports several JSON files containing a look up table which is
useful for identifying faces and volumes. The output file names are
generated from .stp_filename properties.
Args:
output_folder (str, optional): directory of outputfiles.
Defaults to "".
Raises:
ValueError: if one .stp_filename property is set to None
Returns:
list: list of output file names
"""
filenames = []
for entry in self.shapes_and_components:
if entry.stp_filename is None:
raise ValueError(
"set .stp_filename property for \
Shapes before using the export_stp method"
)
filenames.append(
str(Path(output_folder) / Path(entry.stp_filename)))
entry.export_physical_groups(
Path(output_folder) / Path(entry.stp_filename))
return filenames
def export_svg(self, filename):
"""Exports an svg file for the Reactor.solid. If the filename provided
doesn't end with .svg it will be added.
Args:
filename (str): the filename of the svg file to be exported
"""
path_filename = Path(filename)
if path_filename.suffix != ".svg":
path_filename = path_filename.with_suffix(".svg")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
with open(path_filename, "w") as out_file:
exporters.exportShape(self.solid, "SVG", out_file)
print("Saved file as ", path_filename)
def export_graveyard(
self,
graveyard_offset=100,
filename="Graveyard.stp"):
"""Writes an stp file (CAD geometry) for the reactor graveyard. This
is needed for DAGMC simulations. This method also calls
Reactor.make_graveyard with the offset.
Args:
filename (str): the filename for saving the stp file
graveyard_offset (float): the offset between the largest edge of
the geometry and inner bounding shell created. Defaults to
Reactor.graveyard_offset
Returns:
str: the stp filename created
"""
self.make_graveyard(graveyard_offset=graveyard_offset)
self.graveyard.export_stp(Path(filename))
return filename
def make_graveyard(self, graveyard_offset=100):
"""Creates a graveyard volume (bounding box) that encapsulates all
volumes. This is required by DAGMC when performing neutronics
simulations.
Args:
graveyard_offset (float): the offset between the largest edge of
the geometry and inner bounding shell created. Defaults to
Reactor.graveyard_offset
Returns:
CadQuery solid: a shell volume that bounds the geometry, referred
to as a graveyard in DAGMC
"""
self.graveyard_offset = graveyard_offset
for component in self.shapes_and_components:
if component.solid is None:
component.create_solid()
graveyard_shape = paramak.HollowCube(
length=self.largest_dimension * 2 + graveyard_offset * 2,
name="Graveyard",
material_tag="Graveyard",
stp_filename="Graveyard.stp",
stl_filename="Graveyard.stl",
)
self.graveyard = graveyard_shape
return graveyard_shape
def export_2d_image(
self,
filename="2d_slice.png",
xmin=0.0,
xmax=900.0,
ymin=-600.0,
ymax=600.0):
"""Creates a 2D slice image (png) of the reactor.
Args:
filename (str): output filename of the image created
Returns:
str: png filename created
"""
path_filename = Path(filename)
if path_filename.suffix != ".png":
path_filename = path_filename.with_suffix(".png")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
fig, ax = plt.subplots()
# creates indvidual patches for each Shape which are combined together
for entry in self.shapes_and_components:
patch = entry._create_patch()
ax.add_collection(patch)
ax.axis("equal")
ax.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
ax.set_aspect("equal", "box")
Path(filename).parent.mkdir(parents=True, exist_ok=True)
plt.savefig(filename, dpi=100)
plt.close()
print("\n saved 2d image to ", str(path_filename))
return str(path_filename)
def export_html(self, filename="reactor.html"):
"""Creates a html graph representation of the points for the Shape
objects that make up the reactor. Note, If filename provided doesn't end
with .html then it will be appended.
Args:
filename (str): the filename to save the html graph
Returns:
plotly figure: figure object
"""
path_filename = Path(filename)
if path_filename.suffix != ".html":
path_filename = path_filename.with_suffix(".html")
path_filename.parents[0].mkdir(parents=True, exist_ok=True)
fig = go.Figure()
fig.update_layout(
{"title": "coordinates of components", "hovermode": "closest"}
)
# accesses the Shape traces for each Shape and adds them to the figure
for entry in self.shapes_and_components:
fig.add_trace(entry._trace())
fig.write_html(str(path_filename))
print("Exported html graph to ", str(path_filename))
return fig
| [
[
[
8,
12
],
[
9626,
9630
]
],
[
[
37,
45
],
[
4046,
4054
]
],
[
[
66,
70
],
[
9371,
9375
],
[
11362,
11366
],
[
11384,
11388
],
[
11466,
11470
],
[
11488,
11492
],
[
11767,
11771
],
[
11789,
11793
],
[
11872,
11876
],
[
11894,
11898
],
[
13056,
13060
],
[
13078,
13082
],
[
13151,
13155
],
[
13189,
13193
],
[
13443,
13447
],
[
13465,
13469
],
[
13548,
13552
],
[
13570,
13574
],
[
14808,
14812
],
[
17093,
17097
],
[
17115,
17119
],
[
17200,
17204
],
[
17222,
17226
],
[
17561,
17565
],
[
18663,
18667
],
[
20224,
20228
],
[
20782,
20786
],
[
21422,
21426
]
],
[
[
79,
93
],
[
5048,
5050
],
[
5299,
5301
]
],
[
[
101,
125
],
[
20433,
20436
],
[
20847,
20850
],
[
20886,
20889
]
],
[
[
133,
159
],
[
21629,
21631
]
],
[
[
181,
190
],
[
17815,
17824
]
],
[
[
199,
206
],
[
2677,
2684
],
[
2710,
2717
],
[
2753,
2760
],
[
6578,
6585
],
[
6611,
6618
],
[
6654,
6661
],
[
19490,
19497
]
],
[
[
245,
265
],
[
15246,
15266
],
[
15750,
15770
]
],
[
[
305,
330
],
[
15030,
15055
]
],
[
[
358,
366
],
[
3726,
3734
],
[
3862,
3870
]
],
[
[
375,
382
]
]
] |
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
def hive_service(
name,
action='start'):
import params
if name == 'metastore':
pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
cmd = format(
"env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.err {pid_file} {hive_server_conf_dir}")
elif name == 'hiveserver2':
pid_file = format("{hive_pid_dir}/{hive_pid}")
cmd = format(
"env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.err {pid_file} {hive_server_conf_dir}")
if action == 'start':
demon_cmd = format("{cmd}")
no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
Execute(demon_cmd,
user=params.hive_user,
not_if=no_op_test
)
if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
db_connection_check_command = format(
"{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
Execute(db_connection_check_command,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
elif action == 'stop':
demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
Execute(demon_cmd)
| [
[
[
817,
818
],
[
1577,
1584
],
[
2089,
2096
],
[
2307,
2314
]
],
[
[
825,
837
]
]
] |
"""
Logging module for printing status during an exploit, and internally
within ``pwntools``.
Exploit Developers
------------------
By using the standard ``from pwn import *``, an object named ``log`` will
be inserted into the global namespace. You can use this to print out
status messages during exploitation.
For example,::
log.info('Hello, world!')
prints::
[*] Hello, world!
Additionally, there are some nifty mechanisms for performing status updates
on a running job (e.g. when brute-forcing).::
p = log.progress('Working')
p.status('Reticulating splines')
time.sleep(1)
p.success('Got a shell!')
The verbosity of logging can be most easily controlled by setting
``log_level`` on the global ``context`` object.::
log.info("No you see me")
context.log_level = 'error'
log.info("Now you don't")
The purpose of this attribute is to control what gets printed to the screen,
not what gets emitted. This means that you can put all logging events into
a log file, while only wanting to see a small subset of them on your screen.
Pwnlib Developers
-----------------
A module-specific logger can be imported into the module via::
from pwnlib.log import getLogger
log = getLogger(__name__)
This provides an easy way to filter logging programmatically
or via a configuration file for debugging.
When using ``progress``, you should use the ``with``
keyword to manage scoping, to ensure the spinner stops if an
exception is thrown.
Technical details
-----------------
Familiarity with the :mod:`logging` module is assumed.
A pwnlib root logger named 'pwnlib' is created and a custom handler and
formatter is installed for it. The handler determines its logging level from
:data:`context.log_level`.
Ideally :data:`context.log_level` should only affect which records will be
emitted by the handler such that e.g. logging to a file will not be changed by
it. But for performance reasons it is not feasible log everything in the normal
case. In particular there are tight loops inside :mod:`pwnlib.tubes.tube`, which
we would like to be able to debug, but if we are not debugging them, they should
not spit out messages (even to a log file). For this reason there are a few places
inside pwnlib, that will not even emit a record without :data:`context.log_level`
being set to `logging.DEBUG` or below.
Log records created by ``Progress`` and ``Logger`` objects will set
``'pwnlib_msgtype'`` on the ``extra`` field to signal which kind of message was
generated. This information is used by the formatter to prepend a symbol to the
message, e.g. ``'[+] '`` in ``'[+] got a shell!'``
This field is ignored when using the ``logging`` module's standard formatters.
All status updates (which are not dropped due to throttling) on progress loggers
result in a log record being created. The ``extra`` field then carries a
reference to the ``Progress`` logger as ``'pwnlib_progress'``.
If the custom handler determines that :data:`term.term_mode` is enabled, log
records that have a ``'pwnlib_progess'`` in their ``extra`` field will not
result in a message being emitted but rather an animated progress line (with a
spinner!) being created. Note that other handlers will still see a meaningful
log record.
The custom handler will only handle log records whith a level of at least
:data:`context.log_level`. Thus if e.g. the level for the
``'pwnlib.tubes.ssh'`` is set to ``'DEBUG'`` no additional output will show up
unless :data:`context.log_level` is also set to ``'DEBUG'``. Other handlers
will however see the extra log records generated by the ``'pwnlib.tubes.ssh'``
logger.
"""
from __future__ import absolute_import
from __future__ import division
import logging
import os
import random
import re
import six
import sys
import threading
import time
from pwnlib import term
from pwnlib.config import register_config
from pwnlib.context import Thread
from pwnlib.context import context
from pwnlib.exception import PwnlibException
from pwnlib.term import spinners
from pwnlib.term import text
__all__ = [
'getLogger', 'install_default_handler', 'rootlogger'
]
# list of prefixes to use for the different message types. note that the `text`
# module won't add any escape codes if `pwnlib.context.log_console.isatty()` is `False`
_msgtype_prefixes = {
'status' : [text.magenta, 'x'],
'success' : [text.bold_green, '+'],
'failure' : [text.bold_red, '-'],
'debug' : [text.bold_red, 'DEBUG'],
'info' : [text.bold_blue, '*'],
'warning' : [text.bold_yellow, '!'],
'error' : [text.on_red, 'ERROR'],
'exception' : [text.on_red, 'ERROR'],
'critical' : [text.on_red, 'CRITICAL'],
'info_once' : [text.bold_blue, '*'],
'warning_once' : [text.bold_yellow, '!'],
}
def read_log_config(settings):
log = getLogger(__name__)
for key, value in settings.items():
if '.' not in key:
log.warn("Invalid configuration option %r in section %r" % (key, 'log'))
continue
msgtype, key = key.split('.', 1)
if key == 'color':
current = _msgtype_prefixes[msgtype][0]
_msgtype_prefixes[msgtype][0] = getattr(text, value, current)
elif key == 'symbol':
_msgtype_prefixes[msgtype][1] = value
else:
log.warn("Unknown configuration option %r in section %r" % (key, 'log'))
register_config('log', read_log_config)
# the text decoration to use for spinners. the spinners themselves can be found
# in the `pwnlib.term.spinners` module
_spinner_style = text.bold_blue
class Progress(object):
"""
Progress logger used to generate log records associated with some running
job. Instances can be used as context managers which will automatically
declare the running job a success upon exit or a failure upon a thrown
exception. After :meth:`success` or :meth:`failure` is called the status
can no longer be updated.
This class is intended for internal use. Progress loggers should be created
using :meth:`Logger.progress`.
"""
def __init__(self, logger, msg, status, level, args, kwargs):
self._logger = logger
self._msg = msg
self._status = status
self._level = level
self._stopped = False
self.last_status = 0
self.rate = kwargs.pop('rate', 0)
self._log(status, args, kwargs, 'status')
# it is a common use case to create a logger and then immediately update
# its status line, so we reset `last_status` to accommodate this pattern
self.last_status = 0
def _log(self, status, args, kwargs, msgtype):
# Logs are strings, not bytes. Handle Python3 bytes() objects.
status = six.ensure_text(status)
# this progress logger is stopped, so don't generate any more records
if self._stopped:
return
msg = self._msg
if msg and status:
msg += ': '
msg += status
self._logger._log(self._level, msg, args, kwargs, msgtype, self)
def status(self, status, *args, **kwargs):
"""status(status, *args, **kwargs)
Logs a status update for the running job.
If the progress logger is animated the status line will be updated in
place.
Status updates are throttled at one update per 100ms.
"""
now = time.time()
if (now - self.last_status) > self.rate:
self.last_status = now
self._log(status, args, kwargs, 'status')
def success(self, status = 'Done', *args, **kwargs):
"""success(status = 'Done', *args, **kwargs)
Logs that the running job succeeded. No further status updates are
allowed.
If the Logger is animated, the animation is stopped.
"""
self._log(status, args, kwargs, 'success')
self._stopped = True
def failure(self, status = 'Failed', *args, **kwargs):
"""failure(message)
Logs that the running job failed. No further status updates are
allowed.
If the Logger is animated, the animation is stopped.
"""
self._log(status, args, kwargs, 'failure')
self._stopped = True
def __enter__(self):
return self
def __exit__(self, exc_typ, exc_val, exc_tb):
# if the progress logger is already stopped these are no-ops
if exc_typ is None:
self.success()
else:
self.failure()
class Logger(object):
"""
A class akin to the :class:`logging.LoggerAdapter` class. All public
methods defined on :class:`logging.Logger` instances are defined on this
class.
Also adds some ``pwnlib`` flavor:
* :meth:`progress` (alias :meth:`waitfor`)
* :meth:`success`
* :meth:`failure`
* :meth:`indented`
* :meth:`info_once`
* :meth:`warning_once` (alias :meth:`warn_once`)
Adds ``pwnlib``-specific information for coloring, indentation and progress
logging via log records ``extra`` field.
Loggers instantiated with :func:`getLogger` will be of this class.
"""
_one_time_infos = set()
_one_time_warnings = set()
def __init__(self, logger=None):
if logger is None:
# This is a minor hack to permit user-defined classes which inherit
# from a tube (which do not actually reside in the pwnlib library)
# to receive logging abilities that behave as they would expect from
# the rest of the library
module = self.__module__
if not module.startswith('pwnlib'):
module = 'pwnlib.' + module
# - end hack -
logger_name = '%s.%s.%s' % (module, self.__class__.__name__, id(self))
logger = logging.getLogger(logger_name)
self._logger = logger
def _getlevel(self, levelString):
if isinstance(levelString, six.integer_types):
return levelString
return logging._levelNames[levelString.upper()]
def _log(self, level, msg, args, kwargs, msgtype, progress = None):
# Logs are strings, not bytes. Handle Python3 bytes() objects.
msg = six.ensure_text(msg)
extra = kwargs.get('extra', {})
extra.setdefault('pwnlib_msgtype', msgtype)
extra.setdefault('pwnlib_progress', progress)
kwargs['extra'] = extra
self._logger.log(level, msg, *args, **kwargs)
def progress(self, message, status = '', *args, **kwargs):
"""progress(message, status = '', *args, level = logging.INFO, **kwargs) -> Progress
Creates a new progress logger which creates log records with log level
`level`.
Progress status can be updated using :meth:`Progress.status` and stopped
using :meth:`Progress.success` or :meth:`Progress.failure`.
If `term.term_mode` is enabled the progress logger will be animated.
The progress manager also functions as a context manager. Using context
managers ensures that animations stop even if an exception is raised.
.. code-block:: python
with log.progress('Trying something...') as p:
for i in range(10):
p.status("At %i" % i)
time.sleep(0.5)
x = 1/0
"""
level = self._getlevel(kwargs.pop('level', logging.INFO))
return Progress(self, message, status, level, args, kwargs)
def waitfor(self, *args, **kwargs):
"""Alias for :meth:`progress`."""
return self.progress(*args, **kwargs)
def indented(self, message, *args, **kwargs):
"""indented(message, *args, level = logging.INFO, **kwargs)
Log a message but don't put a line prefix on it.
Arguments:
level(int): Alternate log level at which to set the indented
message. Defaults to :const:`logging.INFO`.
"""
level = self._getlevel(kwargs.pop('level', logging.INFO))
self._log(level, message, args, kwargs, 'indented')
def success(self, message, *args, **kwargs):
"""success(message, *args, **kwargs)
Logs a success message.
"""
self._log(logging.INFO, message, args, kwargs, 'success')
def failure(self, message, *args, **kwargs):
"""failure(message, *args, **kwargs)
Logs a failure message.
"""
self._log(logging.INFO, message, args, kwargs, 'failure')
def info_once(self, message, *args, **kwargs):
"""info_once(message, *args, **kwargs)
Logs an info message. The same message is never printed again.
"""
m = message % args
if m not in self._one_time_infos:
if self.isEnabledFor(logging.INFO):
self._one_time_infos.add(m)
self._log(logging.INFO, message, args, kwargs, 'info_once')
def warning_once(self, message, *args, **kwargs):
"""warning_once(message, *args, **kwargs)
Logs a warning message. The same message is never printed again.
"""
m = message % args
if m not in self._one_time_warnings:
if self.isEnabledFor(logging.WARNING):
self._one_time_warnings.add(m)
self._log(logging.WARNING, message, args, kwargs, 'warning_once')
def warn_once(self, *args, **kwargs):
"""Alias for :meth:`warning_once`."""
return self.warning_once(*args, **kwargs)
# logging functions also exposed by `logging.Logger`
def debug(self, message, *args, **kwargs):
"""debug(message, *args, **kwargs)
Logs a debug message.
"""
self._log(logging.DEBUG, message, args, kwargs, 'debug')
def info(self, message, *args, **kwargs):
"""info(message, *args, **kwargs)
Logs an info message.
"""
self._log(logging.INFO, message, args, kwargs, 'info')
def hexdump(self, message, *args, **kwargs):
# cyclic dependencies FTW!
# TODO: Move pwnlib.util.fiddling.hexdump into a new module.
import pwnlib.util.fiddling
self.info(pwnlib.util.fiddling.hexdump(message, *args, **kwargs))
def warning(self, message, *args, **kwargs):
"""warning(message, *args, **kwargs)
Logs a warning message.
"""
self._log(logging.WARNING, message, args, kwargs, 'warning')
def warn(self, *args, **kwargs):
"""Alias for :meth:`warning`."""
return self.warning(*args, **kwargs)
def error(self, message, *args, **kwargs):
"""error(message, *args, **kwargs)
To be called outside an exception handler.
Logs an error message, then raises a ``PwnlibException``.
"""
self._log(logging.ERROR, message, args, kwargs, 'error')
raise PwnlibException(message % args)
def exception(self, message, *args, **kwargs):
"""exception(message, *args, **kwargs)
To be called from an exception handler.
Logs a error message, then re-raises the current exception.
"""
kwargs["exc_info"] = 1
self._log(logging.ERROR, message, args, kwargs, 'exception')
raise
def critical(self, message, *args, **kwargs):
"""critical(message, *args, **kwargs)
Logs a critical message.
"""
self._log(logging.CRITICAL, message, args, kwargs, 'critical')
def log(self, level, message, *args, **kwargs):
"""log(level, message, *args, **kwargs)
Logs a message with log level `level`. The ``pwnlib`` formatter will
use the default :mod:`logging` formater to format this message.
"""
self._log(level, message, args, kwargs, None)
def isEnabledFor(self, level):
"""isEnabledFor(level) -> bool
See if the underlying logger is enabled for the specified level.
"""
effectiveLevel = self._logger.getEffectiveLevel()
if effectiveLevel == 1:
effectiveLevel = context.log_level
return effectiveLevel <= level
def setLevel(self, level):
"""setLevel(level)
Set the logging level for the underlying logger.
"""
with context.local(log_level=level):
self._logger.setLevel(context.log_level)
def addHandler(self, handler):
"""addHandler(handler)
Add the specified handler to the underlying logger.
"""
self._logger.addHandler(handler)
def removeHandler(self, handler):
"""removeHandler(handler)
Remove the specified handler from the underlying logger.
"""
self._logger.removeHandler(handler)
@property
def level(self):
return self._logger.level
@level.setter
def level(self, value):
with context.local(log_level=value):
self._logger.level = context.log_level
class Handler(logging.StreamHandler):
"""
A custom handler class. This class will report whatever
:data:`context.log_level` is currently set to as its log level.
If :data:`term.term_mode` is enabled log records originating from a progress
logger will not be emitted but rather an animated progress line will be
created.
An instance of this handler is added to the ``'pwnlib'`` logger.
"""
@property
def stream(self):
return context.log_console
@stream.setter
def stream(self, value):
pass
def emit(self, record):
"""
Emit a log record or create/update an animated progress logger
depending on whether :data:`term.term_mode` is enabled.
"""
# We have set the root 'pwnlib' logger to have a logLevel of 1,
# when logging has been enabled via install_default_handler.
#
# If the level is 1, we should only process the record if
# context.log_level is less than the record's log level.
#
# If the level is not 1, somebody else expressly set the log
# level somewhere on the tree, and we should use that value.
level = logging.getLogger(record.name).getEffectiveLevel()
if level == 1:
level = context.log_level
if level > record.levelno:
return
progress = getattr(record, 'pwnlib_progress', None)
# if the record originates from a `Progress` object and term handling
# is enabled we can have animated spinners! so check that
if progress is None or not term.term_mode:
super(Handler, self).emit(record)
return
# yay, spinners!
# since we want to be able to update the spinner we overwrite the
# message type so that the formatter doesn't output a prefix symbol
msgtype = record.pwnlib_msgtype
record.pwnlib_msgtype = 'animated'
msg = "%s\n" % self.format(record)
# we enrich the `Progress` object to keep track of the spinner
if not hasattr(progress, '_spinner_handle'):
spinner_handle = term.output('')
msg_handle = term.output(msg)
stop = threading.Event()
def spin():
'''Wheeeee!'''
state = 0
states = random.choice(spinners.spinners)
while True:
prefix = '[%s] ' % _spinner_style(states[state])
spinner_handle.update(prefix)
state = (state + 1) % len(states)
if stop.wait(0.1):
break
t = Thread(target = spin)
t.daemon = True
t.start()
progress._spinner_handle = spinner_handle
progress._msg_handle = msg_handle
progress._stop_event = stop
progress._spinner_thread = t
else:
progress._msg_handle.update(msg)
# if the message type was not a status message update, then we should
# stop the spinner
if msgtype != 'status':
progress._stop_event.set()
progress._spinner_thread.join()
style, symb = _msgtype_prefixes[msgtype]
prefix = '[%s] ' % style(symb)
progress._spinner_handle.update(prefix)
class Formatter(logging.Formatter):
"""
Logging formatter which performs custom formatting for log records
containing the ``'pwnlib_msgtype'`` attribute. Other records are formatted
using the `logging` modules default formatter.
If ``'pwnlib_msgtype'`` is set, it performs the following actions:
* A prefix looked up in `_msgtype_prefixes` is prepended to the message.
* The message is prefixed such that it starts on column four.
* If the message spans multiple lines they are split, and all subsequent
lines are indented.
This formatter is used by the handler installed on the ``'pwnlib'`` logger.
"""
# Indentation from the left side of the terminal.
# All log messages will be indented at list this far.
indent = ' '
# Newline, followed by an indent. Used to wrap multiple lines.
nlindent = '\n' + indent
def format(self, record):
# use the default formatter to actually format the record
msg = super(Formatter, self).format(record)
# then put on a prefix symbol according to the message type
msgtype = getattr(record, 'pwnlib_msgtype', None)
# if 'pwnlib_msgtype' is not set (or set to `None`) we just return the
# message as it is
if msgtype is None:
return msg
if msgtype in _msgtype_prefixes:
style, symb = _msgtype_prefixes[msgtype]
prefix = '[%s] ' % style(symb)
elif msgtype == 'indented':
prefix = self.indent
elif msgtype == 'animated':
# the handler will take care of updating the spinner, so we will
# not include it here
prefix = ''
else:
# this should never happen
prefix = '[?] '
msg = prefix + msg
msg = self.nlindent.join(msg.splitlines())
return msg
# we keep a dictionary of loggers such that multiple calls to `getLogger` with
# the same name will return the same logger
def getLogger(name):
return Logger(logging.getLogger(name))
class LogfileHandler(logging.FileHandler):
def __init__(self):
super(LogfileHandler, self).__init__('', delay=1)
@property
def stream(self):
return context.log_file
@stream.setter
def stream(self, value):
pass
def handle(self, *a, **kw):
if self.stream.name is not None:
super(LogfileHandler, self).handle(*a, **kw)
iso_8601 = '%Y-%m-%dT%H:%M:%S'
fmt = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
log_file = LogfileHandler()
log_file.setFormatter(logging.Formatter(fmt, iso_8601))
#
# The root 'pwnlib' logger is declared here. To change the target of all
# 'pwntools'-specific logging, only this logger needs to be changed.
#
# Logging cascades upward through the hierarchy,
# so the only point that should ever need to be
# modified is the root 'pwnlib' logger.
#
# For example:
# map(rootlogger.removeHandler, rootlogger.handlers)
# logger.addHandler(myCoolPitchingHandler)
#
rootlogger = getLogger('pwnlib')
console = Handler()
formatter = Formatter()
console.setFormatter(formatter)
def install_default_handler():
'''install_default_handler()
Instantiates a :class:`Handler` and :class:`Formatter` and installs them for
the ``pwnlib`` root logger. This function is automatically called from when
importing :mod:`pwn`.
'''
logger = logging.getLogger('pwnlib')
if console not in logger.handlers:
logger.addHandler(console)
logger.addHandler(log_file)
logger.setLevel(1)
| [
[
[
3663,
3678
]
],
[
[
3702,
3710
]
],
[
[
3719,
3726
],
[
16940,
16947
],
[
20278,
20285
],
[
22353,
22360
],
[
22858,
22865
],
[
9829,
9836
],
[
10031,
10038
],
[
11415,
11422
],
[
12029,
12036
],
[
12262,
12269
],
[
12468,
12475
],
[
12802,
12809
],
[
12883,
12890
],
[
13230,
13237
],
[
13317,
13324
],
[
13722,
13729
],
[
13919,
13926
],
[
14388,
14395
],
[
14803,
14810
],
[
15174,
15181
],
[
15400,
15407
],
[
18115,
18122
],
[
22306,
22313
],
[
23694,
23701
]
],
[
[
3734,
3736
]
],
[
[
3744,
3750
],
[
19261,
19267
]
],
[
[
3758,
3760
]
],
[
[
3768,
3771
],
[
6786,
6789
],
[
9965,
9968
],
[
10231,
10234
]
],
[
[
3779,
3782
]
],
[
[
3790,
3799
],
[
19137,
19146
]
],
[
[
3807,
3811
],
[
7429,
7433
]
],
[
[
3832,
3836
],
[
18522,
18526
],
[
19060,
19064
],
[
19101,
19105
]
],
[
[
3863,
3878
],
[
5436,
5451
]
],
[
[
3906,
3912
],
[
19580,
19586
]
],
[
[
3940,
3947
],
[
16052,
16059
],
[
16251,
16258
],
[
16317,
16324
],
[
16841,
16848
],
[
16906,
16913
],
[
17401,
17408
],
[
18209,
18216
],
[
22508,
22515
]
],
[
[
3977,
3992
],
[
14864,
14879
]
],
[
[
4017,
4025
],
[
19275,
19283
]
],
[
[
4050,
4054
],
[
4343,
4347
],
[
4385,
4389
],
[
4430,
4434
],
[
4473,
4477
],
[
4520,
4524
],
[
4564,
4568
],
[
4610,
4614
],
[
4655,
4659
],
[
4700,
4704
],
[
4748,
4752
],
[
4792,
4796
],
[
5614,
5618
],
[
5232,
5236
]
],
[
[
4056,
4063
]
],
[
[
4299,
4316
],
[
5150,
5167
],
[
5192,
5209
],
[
5297,
5314
],
[
20139,
20156
],
[
21609,
21626
],
[
21654,
21671
]
],
[
[
4828,
4843
],
[
5459,
5474
]
],
[
[
5597,
5611
],
[
19361,
19375
]
],
[
[
5636,
5644
],
[
11445,
11453
]
],
[
[
8540,
8546
],
[
22299,
22305
]
],
[
[
16932,
16939
],
[
23345,
23352
],
[
18556,
18563
]
],
[
[
20268,
20277
],
[
23367,
23376
],
[
21268,
21277
]
],
[
[
22271,
22280
],
[
23313,
23322
],
[
4865,
4874
]
],
[
[
22338,
22352
],
[
22819,
22833
],
[
22413,
22427
],
[
22677,
22691
]
],
[
[
22717,
22725
],
[
22881,
22889
]
],
[
[
22748,
22751
],
[
22876,
22879
]
],
[
[
22808,
22816
],
[
22836,
22844
],
[
23823,
23831
]
],
[
[
23300,
23310
]
],
[
[
23333,
23340
],
[
23379,
23386
],
[
23730,
23737
],
[
23788,
23795
]
],
[
[
23355,
23364
],
[
23400,
23409
]
],
[
[
23416,
23439
]
]
] |
import rows
import os
from timeit import default_timer
import json
output_path = '../package/data/'
class Brasilio(object):
def __init__(self, output_path='../package/data/', verbose=False):
self.verbose = verbose
self.output_path = output_path
self.timer = default_timer
def __enter__(self):
# Cria diretório package
if not os.path.exists(self.output_path):
os.makedirs(self.output_path)
# Cria resouces.py vazio
json.dump([], open("resources.json", "w"), indent=2)
# Start Timer
self.start = self.timer()
return self
def __exit__(self, *args):
# Cria datapackage
create_datapackage(self.output_path, verbose=False)
# End Timer
end = self.timer()
self.elapsed_secs = end - self.start
self.elapsed = self.elapsed_secs # millisecs
if self.verbose:
print('Sucesso!\n Sua captura demorou: {0:.2f} s'.format(self.elapsed))
def generate_resources(filename, verbose=False):
data_path = os.path.join(output_path, filename)
if verbose:
print('Reading Data')
data = rows.import_from_csv(data_path)
translate = {int: 'integer',
str: 'string'}
resource = {'format': "csv",
"url": "http://brasil.io/dataset/{}?format=csv".format(filename.split('.')[0]),
"path": data_path,
"profile": "tabular-data-resource",
'schema': {
'fields': []}
}
for i, field in enumerate(data.field_names):
resource['schema']['fields'].append({'name': field,
'type': translate[data.field_types[i].TYPE[0]]})
if verbose:
print('Writing resources.json')
# print(type(resources))
# print(json.dumps(resources))
resources = json.load(open("resources.json", "r"))
resources.append(resource)
json.dump(resources, open("resources.json", "w"), indent=2)
def create_datapackage(output_path, verbose=False):
# Criar o datapackage.json
if verbose:
print("Criando datapackage.json")
with open("metadata.json", "r") as mfd:
output = json.load(mfd)
with open("resources.json", "r") as rfd:
output['resources'] = json.load(rfd)
with open("../package/datapackage.json", "w") as datapackage:
json.dump(output, datapackage, indent=2)
if __name__ == '__main__':
pass | [
[
[
7,
11
],
[
1223,
1227
]
],
[
[
19,
21
],
[
394,
396
],
[
440,
442
],
[
1127,
1129
]
],
[
[
41,
54
],
[
288,
301
]
],
[
[
62,
66
],
[
520,
524
],
[
1977,
1981
],
[
2051,
2055
],
[
2320,
2324
],
[
2419,
2423
],
[
2525,
2529
]
],
[
[
68,
79
],
[
1140,
1151
]
],
[
[
108,
116
]
],
[
[
1065,
1083
]
],
[
[
2117,
2135
],
[
751,
769
]
]
] |
#!/usr/bin/env python3
import os
import pathlib
import sys
import subprocess
def has_cargo_fmt():
"""Runs a quick check to see if cargo fmt is installed."""
try:
c = subprocess.run(["cargo", "fmt", "--", "--help"], capture_output=True)
except OSError:
return False
else:
return c.returncode == 0
def get_modified_files():
"""Returns a list of all modified files."""
c = subprocess.run(
["git", "diff-index", "--cached", "--name-only", "HEAD"], capture_output=True
)
return [pathlib.Path(os.fsdecode(p)) for p in c.stdout.splitlines()]
def run_format_check(files):
rust_files = [x for x in files if x.suffix == "rs" and x.isfile()]
if not rust_files:
return 0
ret = subprocess.run(
["cargo", "fmt", "--", "--check", "--color=always"] + rust_files
)
if ret.returncode != 0:
print("", file=sys.stderr)
print(
"\033[1m\033[2minfo: to fix this run `cargo fmt --all` and "
"commit again\033[0m",
file=sys.stderr,
)
return ret.returncode
def main():
if not has_cargo_fmt():
print("warning: cargo fmt not installed")
return
sys.exit(run_format_check(get_modified_files()))
if __name__ == "__main__":
main()
| [
[
[
30,
32
],
[
555,
557
]
],
[
[
40,
47
],
[
542,
549
]
],
[
[
55,
58
],
[
901,
904
],
[
1053,
1056
],
[
1212,
1215
]
],
[
[
66,
76
],
[
184,
194
],
[
422,
432
],
[
755,
765
]
],
[
[
83,
96
],
[
1126,
1139
]
],
[
[
344,
362
],
[
1238,
1256
]
],
[
[
609,
625
],
[
1221,
1237
]
],
[
[
1107,
1111
],
[
1294,
1298
]
]
] |
import os
import distutils.spawn
import mpi4py
from mpi4py import MPI
def check_mpi():
mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
for executable, path in mpi4py.get_config().items():
if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
continue
if mpiexec_path not in path:
raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
if 'Open MPI' not in MPI.get_vendor():
raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
if vendor_number not in mpiexec_path:
raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))
check_mpi()
| [
[
[
7,
9
],
[
110,
112
]
],
[
[
17,
32
],
[
124,
133
]
],
[
[
40,
46
],
[
196,
202
],
[
620,
626
]
],
[
[
66,
69
],
[
667,
670
],
[
855,
858
],
[
1100,
1103
]
],
[
[
75,
84
],
[
1134,
1143
]
]
] |
import queue
import time
import numpy as np
class CameraInformation:
def __init__(self, cam_id: str):
self._frame_queue: queue.Queue = queue.Queue(maxsize=1)
self._frame_shape = None
self._last_frame_time = None
self.is_online = True
self.node_id = cam_id
def write_frame(self, frame):
try:
self._frame_queue.get_nowait()
except queue.Empty:
pass
self._frame_shape = frame.shape
self._last_frame_time = time.time()
self._frame_queue.put_nowait(frame)
def read_frame(self,):
try:
frame = self._frame_queue.get(timeout=2)
if not self.is_online:
self.is_online = True
return frame
except queue.Empty:
if self.is_online:
self.is_online = False
return np.zeros(self._frame_shape)
| [
[
[
7,
12
],
[
149,
154
],
[
135,
140
],
[
408,
413
],
[
773,
778
]
],
[
[
20,
24
],
[
510,
514
]
],
[
[
32,
43
],
[
875,
877
]
],
[
[
52,
69
]
]
] |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..utils import Generate5tt
def test_Generate5tt_inputs():
input_map = dict(
algorithm=dict(
argstr='%s',
mandatory=True,
position=-3,
),
args=dict(argstr='%s', ),
bval_scale=dict(argstr='-bvalue_scaling %s', ),
environ=dict(
nohash=True,
usedefault=True,
),
grad_file=dict(
argstr='-grad %s',
extensions=None,
xor=['grad_fsl'],
),
grad_fsl=dict(
argstr='-fslgrad %s %s',
xor=['grad_file'],
),
in_bval=dict(extensions=None, ),
in_bvec=dict(
argstr='-fslgrad %s %s',
extensions=None,
),
in_file=dict(
argstr='%s',
extensions=None,
mandatory=True,
position=-2,
),
nthreads=dict(
argstr='-nthreads %d',
nohash=True,
),
out_file=dict(
argstr='%s',
extensions=None,
mandatory=True,
position=-1,
),
)
inputs = Generate5tt.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Generate5tt_outputs():
output_map = dict(out_file=dict(extensions=None, ), )
outputs = Generate5tt.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| [
[
[
77,
93
]
],
[
[
114,
125
],
[
1232,
1243
],
[
1533,
1544
]
],
[
[
132,
155
]
],
[
[
1433,
1457
]
]
] |
#!/usr/bin/env python
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='impasse',
# Version chosen for parity with Assimp since we need ABI compatibility
version='5.0.6',
license='BSD',
description='Alternate Python bindings for the Open Asset Import Library (ASSIMP)',
long_description=readme(),
long_description_content_type="text/markdown",
url='https://github.com/SaladDais/Impasse',
author='Salad Dais',
author_email='SaladDais@users.noreply.github.com',
packages=['impasse'],
data_files=[
('share/impasse', ['README.md']),
# TODO: Make these proper console scripts
# ('share/examples/impasse', ['scripts/' + f for f in os.listdir('scripts/')]),
],
install_requires=['numpy', 'cffi'],
python_requires='>=3.7',
zip_safe=False,
tests_require=[
"pytest",
],
test_suite='tests',
)
| [
[
[
46,
51
],
[
127,
132
]
],
[
[
58,
64
],
[
379,
385
]
]
] |
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
# the next line can be removed after installation
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))))
from veriloggen import *
import veriloggen.thread as vthread
import veriloggen.types.axi as axi
def mkLed():
m = Module('blinkled')
clk = m.Input('CLK')
rst = m.Input('RST')
datawidth = 32
addrwidth = 10
myaxi = vthread.AXIM(m, 'myaxi', clk, rst, datawidth)
ram_a = vthread.RAM(m, 'ram_a', clk, rst, datawidth, addrwidth)
ram_b = vthread.RAM(m, 'ram_b', clk, rst, datawidth, addrwidth)
ram_c = vthread.RAM(m, 'ram_c', clk, rst, datawidth, addrwidth)
mulstrm = vthread.Stream(m, 'mul_stream', clk, rst)
mulx = mulstrm.source('x')
muly = mulstrm.source('y')
mulz = mulx * muly
mulstrm.sink(mulz, 'z')
macstrm = vthread.Stream(m, 'mac_stream', clk, rst)
a = macstrm.source('a')
b = macstrm.source('b')
a = a + 1
b = b + 1
sub = macstrm.substream(mulstrm)
sub.to_source('x', a)
sub.to_source('y', b)
c = sub.from_sink('z')
size = macstrm.parameter('size')
sum, sum_valid = macstrm.ReduceAddValid(c, size)
macstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid')
actstrm = vthread.Stream(m, 'act_stream', clk, rst)
a = actstrm.source('a')
b = actstrm.source('b')
a = a + 1
b = b + 1
a = a + 1
b = b + 1
sub = actstrm.substream(mulstrm)
sub.to_source('x', a)
sub.to_source('y', b)
c = sub.from_sink('z')
size = actstrm.parameter('size')
sum, sum_valid = actstrm.ReduceAddValid(c, size)
sum = actstrm.Mux(sum > 0, sum, 0)
actstrm.sink(sum, 'sum', when=sum_valid, when_name='sum_valid')
all_ok = m.TmpReg(initval=0)
def comp_stream_mul(size, offset):
mulstrm.set_source('x', ram_a, offset, size)
mulstrm.set_source('y', ram_b, offset, size)
mulstrm.set_sink('z', ram_c, offset, size)
mulstrm.run()
mulstrm.join()
def comp_stream_mac(size, offset):
macstrm.set_source('a', ram_a, offset, size)
macstrm.set_source('b', ram_b, offset, size)
macstrm.set_parameter('size', size)
macstrm.set_sink('sum', ram_c, offset, 1)
macstrm.run()
macstrm.join()
def comp_stream_act(size, offset):
actstrm.set_source('a', ram_a, offset, size)
actstrm.set_source('b', ram_b, offset, size)
actstrm.set_parameter('size', size)
actstrm.set_sink('sum', ram_c, offset, 1)
actstrm.run()
actstrm.join()
def comp_sequential_mul(size, offset):
sum = 0
for i in range(size):
a = ram_a.read(i + offset)
b = ram_b.read(i + offset)
sum = a * b
ram_c.write(i + offset, sum)
def comp_sequential_mac(size, offset):
sum = 0
for i in range(size):
a = ram_a.read(i + offset) + 1
b = ram_b.read(i + offset) + 1
sum += a * b
ram_c.write(offset, sum)
def comp_sequential_act(size, offset):
sum = 0
for i in range(size):
a = ram_a.read(i + offset) + 1 + 1
b = ram_b.read(i + offset) + 1 + 1
sum += a * b
if sum <= 0:
sum = 0
ram_c.write(offset, sum)
def check(size, offset_stream, offset_seq):
for i in range(size):
st = ram_c.read(i + offset_stream)
sq = ram_c.read(i + offset_seq)
if vthread.verilog.NotEql(st, sq):
all_ok.value = False
if all_ok:
print('# verify: PASSED')
else:
print('# verify: FAILED')
def comp(size):
all_ok.value = True
# mul
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_stream_mul(size, offset)
myaxi.dma_write(ram_c, offset, 1024, size)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_sequential_mul(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, size)
# verification
print('# MUL')
check(size, 0, offset)
# mac
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_stream_mac(size, offset)
myaxi.dma_write(ram_c, offset, 1024, 1)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_sequential_mac(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, 1)
# verification
print('# MAC')
check(1, 0, offset)
# act
# stream
offset = 0
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_stream_act(size, offset)
myaxi.dma_write(ram_c, offset, 1024, 1)
# sequential
offset = size
myaxi.dma_read(ram_a, offset, 0, size)
myaxi.dma_read(ram_b, offset, 512, size)
comp_sequential_act(size, offset)
myaxi.dma_write(ram_c, offset, 1024 * 2, 1)
# verification
print('# ACT')
check(1, 0, offset)
vthread.finish()
th = vthread.Thread(m, 'th_comp', clk, rst, comp)
fsm = th.start(32)
try:
actstrm.draw_graph()
except:
pass
return m
def mkTest(memimg_name=None):
m = Module('test')
# target instance
led = mkLed()
# copy paras and ports
params = m.copy_params(led)
ports = m.copy_sim_ports(led)
clk = ports['CLK']
rst = ports['RST']
memory = axi.AxiMemoryModel(m, 'memory', clk, rst, memimg_name=memimg_name)
memory.connect(ports, 'myaxi')
uut = m.Instance(led, 'uut',
params=m.connect_params(led),
ports=m.connect_ports(led))
#simulation.setup_waveform(m, uut)
simulation.setup_clock(m, clk, hperiod=5)
init = simulation.setup_reset(m, rst, m.make_reset(), period=100)
init.add(
Delay(1000000),
Systask('finish'),
)
return m
def run(filename='tmp.v', simtype='iverilog', outputfile=None):
if outputfile is None:
outputfile = os.path.splitext(os.path.basename(__file__))[0] + '.out'
memimg_name = 'memimg_' + outputfile
test = mkTest(memimg_name=memimg_name)
if filename is not None:
test.to_verilog(filename)
sim = simulation.Simulator(test, sim=simtype)
rslt = sim.run(outputfile=outputfile)
lines = rslt.splitlines()
if simtype == 'iverilog' or (simtype == 'verilator' and lines[-1].startswith('-')):
rslt = '\n'.join(lines[:-1])
return rslt
if __name__ == '__main__':
rslt = run(filename='tmp.v')
print(rslt)
| [
[
[
23,
38
]
],
[
[
62,
76
]
],
[
[
84,
87
],
[
149,
152
]
],
[
[
95,
97
],
[
168,
170
],
[
184,
186
],
[
200,
202
],
[
221,
223
],
[
237,
239
],
[
253,
255
],
[
6511,
6513
],
[
6528,
6530
]
],
[
[
309,
310
],
[
405,
411
],
[
5707,
5713
],
[
6198,
6208
],
[
6251,
6261
],
[
6333,
6338
],
[
6357,
6364
],
[
6729,
6739
]
],
[
[
318,
346
],
[
525,
532
],
[
583,
590
],
[
651,
658
],
[
719,
726
],
[
790,
797
],
[
960,
967
],
[
1375,
1382
],
[
5521,
5528
],
[
3623,
3630
],
[
5494,
5501
]
],
[
[
354,
381
],
[
5918,
5921
]
],
[
[
388,
393
],
[
5755,
5760
]
],
[
[
5673,
5679
],
[
6622,
6628
]
],
[
[
6402,
6405
],
[
7022,
7025
]
],
[
[
7015,
7019
],
[
7054,
7058
]
]
] |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.numbits"""
import json
import sqlite3
from hypothesis import example, given, settings
from hypothesis.strategies import sets, integers
from coverage import env
from coverage.numbits import (
nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection,
numbits_any_intersection, num_in_numbits, register_sqlite_functions,
)
from tests.coveragetest import CoverageTest
# Hypothesis-generated line number data
line_numbers = integers(min_value=1, max_value=9999)
line_number_sets = sets(line_numbers)
# When coverage-testing ourselves, hypothesis complains about a test being
# flaky because the first run exceeds the deadline (and fails), and the second
# run succeeds. Disable the deadline if we are coverage-testing.
default_settings = settings()
if env.METACOV:
default_settings = settings(default_settings, deadline=None)
def good_numbits(numbits):
"""Assert that numbits is good."""
# It shouldn't end with a zero byte, that should have been trimmed off.
assert (not numbits) or (numbits[-1] != 0)
class NumbitsOpTest(CoverageTest):
"""Tests of the numbits operations in numbits.py."""
run_in_temp_dir = False
@given(line_number_sets)
@settings(default_settings)
def test_conversion(self, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
nums2 = numbits_to_nums(numbits)
assert nums == set(nums2)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_union(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbu = numbits_union(nb1, nb2)
good_numbits(nbu)
union = numbits_to_nums(nbu)
assert nums1 | nums2 == set(union)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbi = numbits_intersection(nb1, nb2)
good_numbits(nbi)
intersection = numbits_to_nums(nbi)
assert nums1 & nums2 == set(intersection)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_any_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
inter = numbits_any_intersection(nb1, nb2)
expect = bool(nums1 & nums2)
assert expect == bool(inter)
@given(line_numbers, line_number_sets)
@settings(default_settings)
@example(152, {144})
def test_num_in_numbits(self, num, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
is_in = num_in_numbits(num, numbits)
assert (num in nums) == is_in
class NumbitsSqliteFunctionTest(CoverageTest):
"""Tests of the SQLite integration for numbits functions."""
run_in_temp_dir = False
def setup_test(self):
super().setup_test()
conn = sqlite3.connect(":memory:")
register_sqlite_functions(conn)
self.cursor = conn.cursor()
self.cursor.execute("create table data (id int, numbits blob)")
self.cursor.executemany(
"insert into data (id, numbits) values (?, ?)",
[
(i, nums_to_numbits(range(i, 100, i)))
for i in range(1, 11)
]
)
self.addCleanup(self.cursor.close)
def test_numbits_union(self):
res = self.cursor.execute(
"select numbits_union(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
")"
)
expected = [
7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49,
54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99,
]
answer = numbits_to_nums(list(res)[0][0])
assert expected == answer
def test_numbits_intersection(self):
res = self.cursor.execute(
"select numbits_intersection(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
")"
)
answer = numbits_to_nums(list(res)[0][0])
assert [63] == answer
def test_numbits_any_intersection(self):
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5]))
)
answer = [any_inter for (any_inter,) in res]
assert [1] == answer
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9]))
)
answer = [any_inter for (any_inter,) in res]
assert [0] == answer
def test_num_in_numbits(self):
res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id")
answer = [is_in for (id, is_in) in res]
assert [1, 1, 1, 1, 0, 1, 0, 0, 0, 0] == answer
def test_numbits_to_nums(self):
res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])])
assert [1, 2, 3] == json.loads(res.fetchone()[0])
| [
[
[
197,
201
],
[
5526,
5530
]
],
[
[
209,
216
],
[
3297,
3304
]
],
[
[
241,
248
],
[
2864,
2871
]
],
[
[
250,
255
],
[
1355,
1360
],
[
1599,
1604
],
[
1989,
1994
],
[
2407,
2412
],
[
2789,
2794
]
],
[
[
257,
265
],
[
943,
951
],
[
993,
1001
],
[
1384,
1392
],
[
1646,
1654
],
[
2036,
2044
],
[
2454,
2462
],
[
2832,
2840
]
],
[
[
300,
304
],
[
684,
688
]
],
[
[
306,
314
],
[
627,
635
]
],
[
[
337,
340
],
[
957,
960
]
],
[
[
376,
391
],
[
1466,
1481
],
[
1727,
1742
],
[
1790,
1805
],
[
2124,
2139
],
[
2187,
2202
],
[
2546,
2561
],
[
2609,
2624
],
[
2948,
2963
],
[
3600,
3615
],
[
4732,
4747
],
[
4760,
4775
],
[
4982,
4997
],
[
5010,
5025
],
[
5469,
5484
]
],
[
[
393,
408
],
[
1534,
1549
],
[
1919,
1934
],
[
2330,
2345
],
[
4165,
4180
],
[
4522,
4537
]
],
[
[
410,
423
],
[
1853,
1866
]
],
[
[
425,
445
],
[
2250,
2270
]
],
[
[
451,
475
],
[
2674,
2698
]
],
[
[
477,
491
],
[
3016,
3030
]
],
[
[
493,
518
],
[
3333,
3358
]
],
[
[
558,
570
],
[
1248,
1260
],
[
3117,
3129
]
],
[
[
612,
624
],
[
689,
701
],
[
2795,
2807
]
],
[
[
665,
681
],
[
1361,
1377
],
[
1605,
1621
],
[
1623,
1639
],
[
1995,
2011
],
[
2013,
2029
],
[
2413,
2429
],
[
2431,
2447
],
[
2809,
2825
]
],
[
[
924,
940
],
[
1002,
1018
],
[
1393,
1409
],
[
1655,
1671
],
[
2045,
2061
],
[
2463,
2479
],
[
2841,
2857
]
],
[
[
974,
990
],
[
1393,
1409
],
[
1655,
1671
],
[
2045,
2061
],
[
2463,
2479
],
[
2841,
2857
]
],
[
[
1041,
1053
],
[
1496,
1508
],
[
1758,
1770
],
[
1821,
1833
],
[
1885,
1897
],
[
2155,
2167
],
[
2218,
2230
],
[
2289,
2301
],
[
2577,
2589
],
[
2640,
2652
],
[
2978,
2990
]
],
[
[
1234,
1247
]
],
[
[
3091,
3116
]
]
] |
#!/usr/bin/env python3
import os
import math
from cereal import car, log
from common.numpy_fast import clip, interp
from common.realtime import sec_since_boot, config_realtime_process, Priority, Ratekeeper, DT_CTRL
from common.profiler import Profiler
from common.params import Params, put_nonblocking
import cereal.messaging as messaging
from selfdrive.config import Conversions as CV
from selfdrive.swaglog import cloudlog
from selfdrive.boardd.boardd import can_list_to_can_capnp
from selfdrive.car.car_helpers import get_car, get_startup_event, get_one_can
from selfdrive.controls.lib.lane_planner import CAMERA_OFFSET
from selfdrive.controls.lib.drive_helpers import update_v_cruise, initialize_v_cruise
from selfdrive.controls.lib.longcontrol import LongControl, STARTING_TARGET_SPEED
from selfdrive.controls.lib.latcontrol_pid import LatControlPID
from selfdrive.controls.lib.latcontrol_indi import LatControlINDI
from selfdrive.controls.lib.latcontrol_lqr import LatControlLQR
from selfdrive.controls.lib.latcontrol_angle import LatControlAngle
from selfdrive.controls.lib.events import Events, ET
from selfdrive.controls.lib.alertmanager import AlertManager
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.controls.lib.longitudinal_planner import LON_MPC_STEP
from selfdrive.locationd.calibrationd import Calibration
from selfdrive.hardware import HARDWARE, TICI
from selfdrive.car.hyundai.scc_smoother import SccSmoother
from selfdrive.ntune import ntune_get, ntune_isEnabled
LDW_MIN_SPEED = 31 * CV.MPH_TO_MS
LANE_DEPARTURE_THRESHOLD = 0.1
STEER_ANGLE_SATURATION_TIMEOUT = 1.0 / DT_CTRL
STEER_ANGLE_SATURATION_THRESHOLD = 2.5 # Degrees
SIMULATION = "SIMULATION" in os.environ
NOSENSOR = "NOSENSOR" in os.environ
IGNORE_PROCESSES = set(["rtshield", "uploader", "deleter", "loggerd", "logmessaged", "tombstoned", "logcatd", "proclogd", "clocksd", "updated", "timezoned", "manage_athenad"])
ThermalStatus = log.DeviceState.ThermalStatus
State = log.ControlsState.OpenpilotState
PandaType = log.PandaState.PandaType
Desire = log.LateralPlan.Desire
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
EventName = car.CarEvent.EventName
class Controls:
def __init__(self, sm=None, pm=None, can_sock=None):
config_realtime_process(4 if TICI else 3, Priority.CTRL_HIGH)
# Setup sockets
self.pm = pm
if self.pm is None:
self.pm = messaging.PubMaster(['sendcan', 'controlsState', 'carState',
'carControl', 'carEvents', 'carParams'])
self.camera_packets = ["roadCameraState", "driverCameraState"]
if TICI:
self.camera_packets.append("wideRoadCameraState")
self.sm = sm
if self.sm is None:
ignore = ['driverCameraState', 'managerState'] if SIMULATION else None
self.sm = messaging.SubMaster(['deviceState', 'pandaState', 'modelV2', 'liveCalibration',
'driverMonitoringState', 'longitudinalPlan', 'lateralPlan', 'liveLocationKalman',
'managerState', 'liveParameters', 'radarState'] + self.camera_packets,
ignore_alive=ignore, ignore_avg_freq=['radarState', 'longitudinalPlan'])
self.can_sock = can_sock
if can_sock is None:
can_timeout = None if os.environ.get('NO_CAN_TIMEOUT', False) else 100
self.can_sock = messaging.sub_sock('can', timeout=can_timeout)
if TICI:
self.log_sock = messaging.sub_sock('androidLog')
# wait for one pandaState and one CAN packet
hw_type = messaging.recv_one(self.sm.sock['pandaState']).pandaState.pandaType
has_relay = hw_type in [PandaType.blackPanda, PandaType.uno, PandaType.dos]
print("Waiting for CAN messages...")
get_one_can(self.can_sock)
self.CI, self.CP = get_car(self.can_sock, self.pm.sock['sendcan'], has_relay)
# read params
params = Params()
self.is_metric = params.get_bool("IsMetric")
self.is_ldw_enabled = params.get_bool("IsLdwEnabled")
self.enable_lte_onroad = params.get_bool("EnableLteOnroad")
community_feature_toggle = params.get_bool("CommunityFeaturesToggle")
openpilot_enabled_toggle = params.get_bool("OpenpilotEnabledToggle")
passive = params.get_bool("Passive") or not openpilot_enabled_toggle
# detect sound card presence and ensure successful init
sounds_available = HARDWARE.get_sound_card_online()
car_recognized = self.CP.carName != 'mock'
fuzzy_fingerprint = self.CP.fuzzyFingerprint
# If stock camera is disconnected, we loaded car controls and it's not dashcam mode
controller_available = self.CP.enableCamera and self.CI.CC is not None and not passive and not self.CP.dashcamOnly
community_feature = self.CP.communityFeature or fuzzy_fingerprint
community_feature_disallowed = community_feature and (not community_feature_toggle)
self.read_only = not car_recognized or not controller_available or \
self.CP.dashcamOnly or community_feature_disallowed
if self.read_only:
self.CP.safetyModel = car.CarParams.SafetyModel.noOutput
# Write CarParams for radard
cp_bytes = self.CP.to_bytes()
params.put("CarParams", cp_bytes)
put_nonblocking("CarParamsCache", cp_bytes)
self.CC = car.CarControl.new_message()
self.AM = AlertManager()
self.events = Events()
self.LoC = LongControl(self.CP, self.CI.compute_gb)
self.VM = VehicleModel(self.CP)
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.LaC = LatControlAngle(self.CP)
elif self.CP.lateralTuning.which() == 'pid':
self.LaC = LatControlPID(self.CP)
elif self.CP.lateralTuning.which() == 'indi':
self.LaC = LatControlINDI(self.CP)
elif self.CP.lateralTuning.which() == 'lqr':
self.LaC = LatControlLQR(self.CP)
self.initialized = False
self.state = State.disabled
self.enabled = False
self.active = False
self.can_rcv_error = False
self.soft_disable_timer = 0
self.v_cruise_kph = 255
self.v_cruise_kph_last = 0
self.mismatch_counter = 0
self.can_error_counter = 0
self.last_blinker_frame = 0
self.saturated_count = 0
self.distance_traveled = 0
self.last_functional_fan_frame = 0
self.events_prev = []
self.current_alert_types = [ET.PERMANENT]
self.logged_comm_issue = False
# scc smoother
self.is_cruise_enabled = False
self.cruiseVirtualMaxSpeed = 0
self.clu_speed_ms = 0.
self.apply_accel = 0.
self.fused_accel = 0.
self.lead_drel = 0.
self.aReqValue = 0.
self.aReqValueMin = 0.
self.aReqValueMax = 0.
self.angle_steers_des = 0.
# TODO: no longer necessary, aside from process replay
self.sm['liveParameters'].valid = True
self.startup_event = get_startup_event(car_recognized, controller_available, fuzzy_fingerprint)
if not sounds_available:
self.events.add(EventName.soundsUnavailable, static=True)
if community_feature_disallowed:
self.events.add(EventName.communityFeatureDisallowed, static=True)
if not car_recognized:
self.events.add(EventName.carUnrecognized, static=True)
elif self.read_only:
self.events.add(EventName.dashcamMode, static=True)
# controlsd is driven by can recv, expected at 100Hz
self.rk = Ratekeeper(100, print_delay_threshold=None)
self.prof = Profiler(False) # off by default
def update_events(self, CS):
"""Compute carEvents from carState"""
self.events.clear()
self.events.add_from_msg(CS.events)
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
# Handle startup event
if self.startup_event is not None:
self.events.add(self.startup_event)
self.startup_event = None
# Don't add any more events if not initialized
if not self.initialized:
self.events.add(EventName.controlsInitializing)
return
# Create events for battery, temperature, disk space, and memory
if self.sm['deviceState'].batteryPercent < 1 and self.sm['deviceState'].chargingError:
# at zero percent battery, while discharging, OP should not allowed
self.events.add(EventName.lowBattery)
if self.sm['deviceState'].thermalStatus >= ThermalStatus.red:
self.events.add(EventName.overheat)
if self.sm['deviceState'].freeSpacePercent < 7:
# under 7% of space free no enable allowed
self.events.add(EventName.outOfSpace)
if self.sm['deviceState'].memoryUsagePercent > 90:
self.events.add(EventName.lowMemory)
# Alert if fan isn't spinning for 5 seconds
if self.sm['pandaState'].pandaType in [PandaType.uno, PandaType.dos]:
if self.sm['pandaState'].fanSpeedRpm == 0 and self.sm['deviceState'].fanSpeedPercentDesired > 50:
if (self.sm.frame - self.last_functional_fan_frame) * DT_CTRL > 5.0:
self.events.add(EventName.fanMalfunction)
else:
self.last_functional_fan_frame = self.sm.frame
# Handle calibration status
cal_status = self.sm['liveCalibration'].calStatus
if cal_status != Calibration.CALIBRATED:
if cal_status == Calibration.UNCALIBRATED:
self.events.add(EventName.calibrationIncomplete)
else:
self.events.add(EventName.calibrationInvalid)
# Handle lane change
if self.sm['lateralPlan'].laneChangeState == LaneChangeState.preLaneChange:
direction = self.sm['lateralPlan'].laneChangeDirection
if (CS.leftBlindspot and direction == LaneChangeDirection.left) or \
(CS.rightBlindspot and direction == LaneChangeDirection.right):
self.events.add(EventName.laneChangeBlocked)
elif self.sm['lateralPlan'].autoLaneChangeEnabled and self.sm['lateralPlan'].autoLaneChangeTimer > 0:
self.events.add(EventName.autoLaneChange)
else:
if direction == LaneChangeDirection.left:
self.events.add(EventName.preLaneChangeLeft)
else:
self.events.add(EventName.preLaneChangeRight)
elif self.sm['lateralPlan'].laneChangeState in [LaneChangeState.laneChangeStarting,
LaneChangeState.laneChangeFinishing]:
self.events.add(EventName.laneChange)
if self.can_rcv_error or not CS.canValid:
self.events.add(EventName.canError)
safety_mismatch = self.sm['pandaState'].safetyModel != self.CP.safetyModel or self.sm['pandaState'].safetyParam != self.CP.safetyParam
if safety_mismatch or self.mismatch_counter >= 200:
self.events.add(EventName.controlsMismatch)
if not self.sm['liveParameters'].valid:
self.events.add(EventName.vehicleModelInvalid)
if len(self.sm['radarState'].radarErrors):
self.events.add(EventName.radarFault)
elif not self.sm.valid["pandaState"]:
self.events.add(EventName.usbError)
elif not self.sm.all_alive_and_valid():
self.events.add(EventName.commIssue)
if not self.logged_comm_issue:
cloudlog.error(f"commIssue - valid: {self.sm.valid} - alive: {self.sm.alive}")
self.logged_comm_issue = True
else:
self.logged_comm_issue = False
if not self.sm['lateralPlan'].mpcSolutionValid and not (EventName.turningIndicatorOn in self.events.names):
self.events.add(EventName.plannerError)
if not self.sm['liveLocationKalman'].sensorsOK and not NOSENSOR:
if self.sm.frame > 5 / DT_CTRL: # Give locationd some time to receive all the inputs
self.events.add(EventName.sensorDataInvalid)
if not self.sm['liveLocationKalman'].posenetOK:
self.events.add(EventName.posenetInvalid)
if not self.sm['liveLocationKalman'].deviceStable:
self.events.add(EventName.deviceFalling)
if log.PandaState.FaultType.relayMalfunction in self.sm['pandaState'].faults:
self.events.add(EventName.relayMalfunction)
if self.sm['longitudinalPlan'].fcw or (self.enabled and self.sm['modelV2'].meta.hardBrakePredicted):
self.events.add(EventName.fcw)
if TICI and self.enable_lte_onroad:
logs = messaging.drain_sock(self.log_sock, wait_for_one=False)
messages = []
for m in logs:
try:
messages.append(m.androidLog.message)
except UnicodeDecodeError:
pass
for err in ["ERROR_CRC", "ERROR_ECC", "ERROR_STREAM_UNDERFLOW", "APPLY FAILED"]:
for m in messages:
if err not in m:
continue
csid = m.split("CSID:")[-1].split(" ")[0]
evt = {"0": EventName.wideRoadCameraError, "1": EventName.roadCameraError,
"2": EventName.driverCameraError}.get(csid, None)
if evt is not None:
self.events.add(evt)
# TODO: fix simulator
if not SIMULATION:
#if not NOSENSOR:
# if not self.sm['liveLocationKalman'].gpsOK and (self.distance_traveled > 1000) and \
# (not TICI or self.enable_lte_onroad):
# # Not show in first 1 km to allow for driving out of garage. This event shows after 5 minutes
# self.events.add(EventName.noGps)
if not self.sm.all_alive(self.camera_packets):
self.events.add(EventName.cameraMalfunction)
if self.sm['modelV2'].frameDropPerc > 20:
self.events.add(EventName.modeldLagging)
# Check if all manager processes are running
not_running = set(p.name for p in self.sm['managerState'].processes if not p.running)
if self.sm.rcv_frame['managerState'] and (not_running - IGNORE_PROCESSES):
self.events.add(EventName.processNotRunning)
# Only allow engagement with brake pressed when stopped behind another stopped car
#if CS.brakePressed and self.sm['longitudinalPlan'].vTargetFuture >= STARTING_TARGET_SPEED \
#and self.CP.openpilotLongitudinalControl and CS.vEgo < 0.3:
#self.events.add(EventName.noTarget)
def data_sample(self):
"""Receive data from sockets and update carState"""
# Update carState from CAN
can_strs = messaging.drain_sock_raw(self.can_sock, wait_for_one=True)
CS = self.CI.update(self.CC, can_strs)
self.sm.update(0)
all_valid = CS.canValid and self.sm.all_alive_and_valid()
if not self.initialized and (all_valid or self.sm.frame * DT_CTRL > 2.0):
self.initialized = True
Params().put_bool("ControlsReady", True)
# Check for CAN timeout
if not can_strs:
self.can_error_counter += 1
self.can_rcv_error = True
else:
self.can_rcv_error = False
# When the panda and controlsd do not agree on controls_allowed
# we want to disengage openpilot. However the status from the panda goes through
# another socket other than the CAN messages and one can arrive earlier than the other.
# Therefore we allow a mismatch for two samples, then we trigger the disengagement.
if not self.enabled:
self.mismatch_counter = 0
if not self.sm['pandaState'].controlsAllowed and self.enabled:
self.mismatch_counter += 1
self.distance_traveled += CS.vEgo * DT_CTRL
return CS
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
self.v_cruise_kph_last = self.v_cruise_kph
# if stock cruise is completely disabled, then we can use our own set speed logic
self.CP.enableCruise = self.CI.CP.enableCruise
#if not self.CP.enableCruise:
# self.v_cruise_kph = update_v_cruise(self.v_cruise_kph, CS.buttonEvents, self.enabled, self.is_metric)
#elif self.CP.enableCruise and CS.cruiseState.enabled:
# self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
SccSmoother.update_cruise_buttons(self, CS, self.CP.openpilotLongitudinalControl)
# decrease the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
self.soft_disable_timer = max(0, self.soft_disable_timer - 1)
self.current_alert_types = [ET.PERMANENT]
# ENABLED, PRE ENABLING, SOFT DISABLING
if self.state != State.disabled:
# user and immediate disable always have priority in a non-disabled state
if self.events.any(ET.USER_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.USER_DISABLE)
elif self.events.any(ET.IMMEDIATE_DISABLE):
self.state = State.disabled
self.current_alert_types.append(ET.IMMEDIATE_DISABLE)
else:
# ENABLED
if self.state == State.enabled:
if self.events.any(ET.SOFT_DISABLE):
self.state = State.softDisabling
self.soft_disable_timer = 50 # 0.5s
self.current_alert_types.append(ET.SOFT_DISABLE)
# SOFT DISABLING
elif self.state == State.softDisabling:
if not self.events.any(ET.SOFT_DISABLE):
# no more soft disabling condition, so go back to ENABLED
self.state = State.enabled
elif self.events.any(ET.SOFT_DISABLE) and self.soft_disable_timer > 0:
self.current_alert_types.append(ET.SOFT_DISABLE)
elif self.soft_disable_timer <= 0:
self.state = State.disabled
# PRE ENABLING
elif self.state == State.preEnabled:
if not self.events.any(ET.PRE_ENABLE):
self.state = State.enabled
else:
self.current_alert_types.append(ET.PRE_ENABLE)
# DISABLED
elif self.state == State.disabled:
if self.events.any(ET.ENABLE):
if self.events.any(ET.NO_ENTRY):
self.current_alert_types.append(ET.NO_ENTRY)
else:
if self.events.any(ET.PRE_ENABLE):
self.state = State.preEnabled
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
self.v_cruise_kph = initialize_v_cruise(CS.vEgo, CS.buttonEvents, self.v_cruise_kph_last)
# Check if actuators are enabled
self.active = self.state == State.enabled or self.state == State.softDisabling
if self.active:
self.current_alert_types.append(ET.WARNING)
# Check if openpilot is engaged
self.enabled = self.active or self.state == State.preEnabled
def state_control(self, CS):
"""Given the state, this function returns an actuators packet"""
# Update VehicleModel
params = self.sm['liveParameters']
x = max(params.stiffnessFactor, 0.1)
#sr = max(params.steerRatio, 0.1)
if ntune_isEnabled('useLiveSteerRatio'):
sr = max(params.steerRatio, 0.1)
else:
sr = max(ntune_get('steerRatio'), 0.1)
self.VM.update_params(x, sr)
lat_plan = self.sm['lateralPlan']
long_plan = self.sm['longitudinalPlan']
actuators = car.CarControl.Actuators.new_message()
if CS.leftBlinker or CS.rightBlinker:
self.last_blinker_frame = self.sm.frame
# State specific actions
if not self.active:
self.LaC.reset()
self.LoC.reset(v_pid=CS.vEgo)
long_plan_age = DT_CTRL * (self.sm.frame - self.sm.rcv_frame['longitudinalPlan'])
# no greater than dt mpc + dt, to prevent too high extraps
dt = min(long_plan_age, LON_MPC_STEP + DT_CTRL) + DT_CTRL
a_acc_sol = long_plan.aStart + (dt / LON_MPC_STEP) * (long_plan.aTarget - long_plan.aStart)
v_acc_sol = long_plan.vStart + dt * (a_acc_sol + long_plan.aStart) / 2.0
# Gas/Brake PID loop
#actuators.gas, actuators.brake = self.LoC.update(self.active, CS, v_acc_sol, long_plan.vTargetFuture, a_acc_sol, self.CP)
# scc smoother
actuators.gas, actuators.brake = self.LoC.update(self.active and CS.cruiseState.speed > 1.,
CS,
v_acc_sol,
long_plan.vTargetFuture,
a_acc_sol,
self.CP,
self.sm['radarState'])
# Steering PID loop and lateral MPC
actuators.steer, actuators.steeringAngleDeg, lac_log = self.LaC.update(self.active, CS, self.CP, self.VM, params, lat_plan)
# Check for difference between desired angle and angle for angle based control
angle_control_saturated = self.CP.steerControlType == car.CarParams.SteerControlType.angle and \
abs(actuators.steeringAngleDeg - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
if angle_control_saturated and not CS.steeringPressed and self.active:
self.saturated_count += 1
else:
self.saturated_count = 0
# Send a "steering required alert" if saturation count has reached the limit
if (lac_log.saturated and not CS.steeringPressed) or \
(self.saturated_count > STEER_ANGLE_SATURATION_TIMEOUT):
if len(lat_plan.dPathPoints):
# Check if we deviated from the path
left_deviation = actuators.steer > 0 and lat_plan.dPathPoints[0] < -0.1
right_deviation = actuators.steer < 0 and lat_plan.dPathPoints[0] > 0.1
# if left_deviation or right_deviation:
# self.events.add(EventName.steerSaturated)
return actuators, v_acc_sol, a_acc_sol, lac_log
def publish_logs(self, CS, start_time, actuators, v_acc, a_acc, lac_log):
"""Send actuators and hud commands to the car, send controlsstate and MPC logging"""
CC = car.CarControl.new_message()
CC.enabled = self.enabled
CC.actuators = actuators
CC.cruiseControl.override = True
CC.cruiseControl.cancel = self.CP.enableCruise and not self.enabled and CS.cruiseState.enabled
# Some override values for Honda
# brake discount removes a sharp nonlinearity
brake_discount = (1.0 - clip(actuators.brake * 3., 0.0, 1.0))
speed_override = max(0.0, (self.LoC.v_pid + CS.cruiseState.speedOffset) * brake_discount)
CC.cruiseControl.speedOverride = float(speed_override if self.CP.enableCruise else 0.0)
CC.cruiseControl.accelOverride = self.CI.calc_accel_override(CS.aEgo, self.sm['longitudinalPlan'].aTarget, CS.vEgo, self.sm['longitudinalPlan'].vTarget)
CC.hudControl.setSpeed = float(self.v_cruise_kph * CV.KPH_TO_MS)
CC.hudControl.speedVisible = self.enabled
CC.hudControl.lanesVisible = self.enabled
CC.hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
right_lane_visible = self.sm['lateralPlan'].rProb > 0.5
left_lane_visible = self.sm['lateralPlan'].lProb > 0.5
CC.hudControl.rightLaneVisible = bool(right_lane_visible)
CC.hudControl.leftLaneVisible = bool(left_lane_visible)
recent_blinker = (self.sm.frame - self.last_blinker_frame) * DT_CTRL < 5.0 # 5s blinker cooldown
ldw_allowed = self.is_ldw_enabled and CS.vEgo > LDW_MIN_SPEED and not recent_blinker \
and not self.active and self.sm['liveCalibration'].calStatus == Calibration.CALIBRATED
meta = self.sm['modelV2'].meta
if len(meta.desirePrediction) and ldw_allowed:
l_lane_change_prob = meta.desirePrediction[Desire.laneChangeLeft - 1]
r_lane_change_prob = meta.desirePrediction[Desire.laneChangeRight - 1]
cameraOffset = ntune_get("cameraOffset")
l_lane_close = left_lane_visible and (self.sm['modelV2'].laneLines[1].y[0] > -(1.08 + cameraOffset))
r_lane_close = right_lane_visible and (self.sm['modelV2'].laneLines[2].y[0] < (1.08 - cameraOffset))
CC.hudControl.leftLaneDepart = bool(l_lane_change_prob > LANE_DEPARTURE_THRESHOLD and l_lane_close)
CC.hudControl.rightLaneDepart = bool(r_lane_change_prob > LANE_DEPARTURE_THRESHOLD and r_lane_close)
if CC.hudControl.rightLaneDepart or CC.hudControl.leftLaneDepart:
self.events.add(EventName.ldw)
clear_event = ET.WARNING if ET.WARNING not in self.current_alert_types else None
alerts = self.events.create_alerts(self.current_alert_types, [self.CP, self.sm, self.is_metric])
self.AM.add_many(self.sm.frame, alerts, self.enabled)
self.AM.process_alerts(self.sm.frame, clear_event)
CC.hudControl.visualAlert = self.AM.visual_alert
if not self.read_only and self.initialized:
# send car controls over can
can_sends = self.CI.apply(CC, self)
self.pm.send('sendcan', can_list_to_can_capnp(can_sends, msgtype='sendcan', valid=CS.canValid))
force_decel = (self.sm['driverMonitoringState'].awarenessStatus < 0.) or \
(self.state == State.softDisabling)
# Curvature & Steering angle
params = self.sm['liveParameters']
lat_plan = self.sm['lateralPlan']
steer_angle_without_offset = math.radians(CS.steeringAngleDeg - params.angleOffsetAverageDeg)
curvature = -self.VM.calc_curvature(steer_angle_without_offset, CS.vEgo)
self.angle_steers_des = math.degrees(self.VM.get_steer_from_curvature(-lat_plan.curvature, CS.vEgo))
self.angle_steers_des += params.angleOffsetDeg
# controlsState
dat = messaging.new_message('controlsState')
dat.valid = CS.canValid
controlsState = dat.controlsState
controlsState.alertText1 = self.AM.alert_text_1
controlsState.alertText2 = self.AM.alert_text_2
controlsState.alertSize = self.AM.alert_size
controlsState.alertStatus = self.AM.alert_status
controlsState.alertBlinkingRate = self.AM.alert_rate
controlsState.alertType = self.AM.alert_type
controlsState.alertSound = self.AM.audible_alert
controlsState.canMonoTimes = list(CS.canMonoTimes)
controlsState.longitudinalPlanMonoTime = self.sm.logMonoTime['longitudinalPlan']
controlsState.lateralPlanMonoTime = self.sm.logMonoTime['lateralPlan']
controlsState.enabled = self.enabled
controlsState.active = self.active
controlsState.curvature = curvature
controlsState.steeringAngleDesiredDeg = self.angle_steers_des
controlsState.state = self.state
controlsState.engageable = not self.events.any(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
controlsState.vCruise = float(self.cruiseVirtualMaxSpeed if self.CP.openpilotLongitudinalControl else self.v_cruise_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
controlsState.vTargetLead = float(v_acc)
controlsState.aTarget = float(a_acc)
controlsState.cumLagMs = -self.rk.remaining * 1000.
controlsState.startMonoTime = int(start_time * 1e9)
controlsState.forceDecel = bool(force_decel)
controlsState.canErrorCounter = self.can_error_counter
controlsState.angleSteers = steer_angle_without_offset * CV.RAD_TO_DEG
controlsState.cluSpeedMs = self.clu_speed_ms
controlsState.applyAccel = self.apply_accel
controlsState.fusedAccel = self.fused_accel
controlsState.leadDist = self.lead_drel
controlsState.aReqValue = self.aReqValue
controlsState.aReqValueMin = self.aReqValueMin
controlsState.aReqValueMax = self.aReqValueMax
controlsState.steerRatio = self.VM.sR
controlsState.steerRateCost = ntune_get('steerRateCost')
controlsState.steerActuatorDelay = ntune_get('steerActuatorDelay')
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
controlsState.lateralControlState.angleState = lac_log
elif self.CP.lateralTuning.which() == 'pid':
controlsState.lateralControlState.pidState = lac_log
elif self.CP.lateralTuning.which() == 'lqr':
controlsState.lateralControlState.lqrState = lac_log
elif self.CP.lateralTuning.which() == 'indi':
controlsState.lateralControlState.indiState = lac_log
self.pm.send('controlsState', dat)
# carState
car_events = self.events.to_msg()
cs_send = messaging.new_message('carState')
cs_send.valid = CS.canValid
cs_send.carState = CS
cs_send.carState.events = car_events
self.pm.send('carState', cs_send)
# carEvents - logged every second or on change
if (self.sm.frame % int(1. / DT_CTRL) == 0) or (self.events.names != self.events_prev):
ce_send = messaging.new_message('carEvents', len(self.events))
ce_send.carEvents = car_events
self.pm.send('carEvents', ce_send)
self.events_prev = self.events.names.copy()
# carParams - logged every 50 seconds (> 1 per segment)
if (self.sm.frame % int(50. / DT_CTRL) == 0):
cp_send = messaging.new_message('carParams')
cp_send.carParams = self.CP
self.pm.send('carParams', cp_send)
# carControl
cc_send = messaging.new_message('carControl')
cc_send.valid = CS.canValid
cc_send.carControl = CC
self.pm.send('carControl', cc_send)
# copy CarControl to pass to CarInterface on the next iteration
self.CC = CC
def step(self):
start_time = sec_since_boot()
self.prof.checkpoint("Ratekeeper", ignore=True)
# Sample data from sockets and get a carState
CS = self.data_sample()
self.prof.checkpoint("Sample")
self.update_events(CS)
if not self.read_only and self.initialized:
# Update control state
self.state_transition(CS)
self.prof.checkpoint("State transition")
# Compute actuators (runs PID loops and lateral MPC)
actuators, v_acc, a_acc, lac_log = self.state_control(CS)
self.prof.checkpoint("State Control")
# Publish data
self.publish_logs(CS, start_time, actuators, v_acc, a_acc, lac_log)
self.prof.checkpoint("Sent")
def controlsd_thread(self):
while True:
self.step()
self.rk.monitor_time()
self.prof.display()
def main(sm=None, pm=None, logcan=None):
controls = Controls(sm, pm, logcan)
controls.controlsd_thread()
if __name__ == "__main__":
main()
| [
[
[
30,
32
],
[
1708,
1710
],
[
1744,
1746
],
[
3359,
3361
]
],
[
[
40,
44
],
[
24500,
24504
],
[
24670,
24674
]
],
[
[
64,
67
],
[
2208,
2211
],
[
5124,
5127
],
[
5328,
5331
],
[
5542,
5545
],
[
18648,
18651
],
[
20258,
20261
],
[
21322,
21325
],
[
27126,
27129
]
],
[
[
69,
72
],
[
1948,
1951
],
[
1986,
1989
],
[
2031,
2034
],
[
2065,
2068
],
[
2106,
2109
],
[
2160,
2163
],
[
11746,
11749
]
],
[
[
103,
107
],
[
21663,
21667
]
],
[
[
109,
115
]
],
[
[
144,
158
],
[
28695,
28709
]
],
[
[
160,
183
],
[
2308,
2331
]
],
[
[
185,
193
],
[
2350,
2358
]
],
[
[
195,
205
],
[
7377,
7387
]
],
[
[
207,
214
],
[
1620,
1627
],
[
8889,
8896
],
[
11421,
11428
],
[
14227,
14234
],
[
15012,
15019
],
[
18911,
18918
],
[
19083,
19090
],
[
19094,
19101
],
[
22582,
22589
],
[
27914,
27921
],
[
28263,
28270
]
],
[
[
243,
251
],
[
7437,
7445
]
],
[
[
278,
284
],
[
3945,
3951
],
[
14279,
14285
]
],
[
[
286,
301
],
[
5269,
5284
]
],
[
[
309,
338
],
[
2448,
2457
],
[
2859,
2868
],
[
3430,
3439
],
[
3513,
3522
],
[
3610,
3619
],
[
12067,
12076
],
[
13977,
13986
],
[
24829,
24838
],
[
27658,
27667
],
[
27989,
27998
],
[
28295,
28304
],
[
28437,
28446
]
],
[
[
368,
385
],
[
1537,
1539
],
[
22100,
22102
],
[
26565,
26567
]
],
[
[
416,
424
],
[
11000,
11008
]
],
[
[
461,
482
],
[
24149,
24170
]
],
[
[
521,
528
],
[
3854,
3861
]
],
[
[
530,
547
],
[
6854,
6871
]
],
[
[
549,
560
],
[
3803,
3814
]
],
[
[
609,
622
]
],
[
[
672,
687
]
],
[
[
689,
708
],
[
17765,
17784
]
],
[
[
756,
767
],
[
5429,
5440
]
],
[
[
769,
790
]
],
[
[
841,
854
],
[
5688,
5701
]
],
[
[
906,
920
],
[
5778,
5792
]
],
[
[
971,
984
],
[
5868,
5881
]
],
[
[
1037,
1052
],
[
5597,
5612
]
],
[
[
1095,
1101
],
[
5404,
5410
]
],
[
[
1103,
1105
],
[
6374,
6376
],
[
15907,
15909
],
[
16108,
16110
],
[
16202,
16204
],
[
16247,
16249
],
[
16346,
16348
],
[
16468,
16470
],
[
16625,
16627
],
[
16749,
16751
],
[
16908,
16910
],
[
17002,
17004
],
[
17207,
17209
],
[
17322,
17324
],
[
17417,
17419
],
[
17456,
17458
],
[
17512,
17514
],
[
17569,
17571
],
[
17724,
17726
],
[
18014,
18016
],
[
23673,
23675
],
[
23659,
23661
],
[
25788,
25790
]
],
[
[
1154,
1166
],
[
5371,
5383
]
],
[
[
1216,
1228
],
[
5484,
5496
]
],
[
[
1285,
1297
],
[
19068,
19080
],
[
19144,
19156
]
],
[
[
1343,
1354
],
[
9131,
9142
],
[
9178,
9189
],
[
22794,
22805
]
],
[
[
1386,
1394
],
[
4429,
4437
]
],
[
[
1396,
1400
],
[
2337,
2341
],
[
2662,
2666
],
[
3485,
3489
],
[
12021,
12025
]
],
[
[
1448,
1459
],
[
15616,
15627
]
],
[
[
1488,
1497
],
[
18484,
18493
],
[
23078,
23087
],
[
26992,
27001
],
[
27058,
27067
]
],
[
[
1499,
1514
],
[
18382,
18397
]
],
[
[
1516,
1529
],
[
22671,
22684
]
],
[
[
1550,
1574
],
[
23382,
23406
],
[
23489,
23513
]
],
[
[
1581,
1611
],
[
20717,
20747
]
],
[
[
1628,
1660
],
[
20363,
20395
]
],
[
[
1679,
1689
],
[
2822,
2832
],
[
12744,
12754
]
],
[
[
1719,
1727
],
[
11382,
11390
]
],
[
[
1755,
1771
],
[
13481,
13497
]
],
[
[
1932,
1945
],
[
8295,
8308
]
],
[
[
1978,
1983
],
[
5938,
5943
],
[
15987,
15992
],
[
16147,
16152
],
[
16291,
16296
],
[
16424,
16429
],
[
16511,
16516
],
[
16695,
16700
],
[
16862,
16867
],
[
17090,
17095
],
[
17156,
17161
],
[
17248,
17253
],
[
17376,
17381
],
[
17610,
17615
],
[
17668,
17673
],
[
17905,
17910
],
[
17936,
17941
],
[
18111,
18116
],
[
24334,
24339
]
],
[
[
2019,
2028
],
[
3706,
3715
],
[
3728,
3737
],
[
3743,
3752
],
[
8692,
8701
],
[
8707,
8716
]
],
[
[
2056,
2062
],
[
22953,
22959
],
[
23029,
23035
]
],
[
[
2088,
2103
],
[
9402,
9417
],
[
10092,
10107
],
[
10177,
10192
]
],
[
[
2138,
2157
],
[
9538,
9557
],
[
9614,
9633
],
[
9889,
9908
]
],
[
[
2196,
2205
],
[
6981,
6990
],
[
7082,
7091
],
[
7182,
7191
],
[
7269,
7278
],
[
7924,
7933
],
[
8226,
8235
],
[
8336,
8345
],
[
8479,
8488
],
[
8579,
8588
],
[
8930,
8939
],
[
9228,
9237
],
[
9297,
9306
],
[
9666,
9675
],
[
9827,
9836
],
[
9941,
9950
],
[
10010,
10019
],
[
10237,
10246
],
[
10328,
10337
],
[
10566,
10575
],
[
10661,
10670
],
[
10762,
10771
],
[
10848,
10857
],
[
10934,
10943
],
[
11225,
11234
],
[
11299,
11308
],
[
11508,
11517
],
[
11611,
11620
],
[
11714,
11723
],
[
11843,
11852
],
[
11998,
12007
],
[
12513,
12522
],
[
12549,
12558
],
[
12598,
12607
],
[
13149,
13158
],
[
13250,
13259
],
[
13524,
13533
],
[
23625,
23634
]
],
[
[
2239,
2247
],
[
29526,
29534
]
],
[
[
29476,
29480
],
[
29612,
29616
]
]
] |
import cozmo
name = input("What is your name? ")
def cozmo_program(robot: cozmo.robot.Robot):
robot.say_text(
f"Hi! My name is Cozmo. How are you, {name}?").wait_for_completed()
cozmo.run_program(cozmo_program)
| [
[
[
7,
12
],
[
195,
200
],
[
77,
82
]
],
[
[
14,
18
],
[
163,
167
]
],
[
[
56,
69
],
[
213,
226
]
]
] |
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import, unicode_literals, print_function
import pytest
from thriftrw._buffer import ReadBuffer
from thriftrw._buffer import WriteBuffer
from thriftrw.errors import EndOfInputError
def test_empty_write_buffer():
buff = WriteBuffer(10)
assert buff.length == 0
assert buff.capacity == 10
assert buff.value == b''
def test_empty_read_buffer():
buff = ReadBuffer(b'')
assert buff.take(0) == b''
with pytest.raises(EndOfInputError):
buff.take(1)
def test_simple_write():
buff = WriteBuffer(10)
buff.write_bytes(b'hello ')
buff.write_bytes(b'world')
assert buff.value == b'hello world'
assert buff.length == 11
def test_simple_read():
buff = ReadBuffer(b'abcd')
assert buff.take(1) == b'a'
assert buff.take(2) == b'bc'
with pytest.raises(EndOfInputError):
buff.take(2)
assert buff.take(1) == b'd'
def test_write_clear():
buff = WriteBuffer(10)
buff.write_bytes(b'foo')
buff.clear()
assert buff.value == b''
assert buff.capacity == 10
assert buff.length == 0
| [
[
[
1126,
1141
]
],
[
[
1143,
1159
]
],
[
[
1161,
1175
]
],
[
[
1184,
1190
],
[
1565,
1571
],
[
1938,
1944
]
],
[
[
1221,
1231
],
[
1508,
1518
],
[
1842,
1852
]
],
[
[
1261,
1272
],
[
1361,
1372
],
[
1656,
1667
],
[
2061,
2072
]
],
[
[
1301,
1316
],
[
1579,
1594
],
[
1952,
1967
]
],
[
[
1323,
1346
]
],
[
[
1471,
1493
]
],
[
[
1624,
1641
]
],
[
[
1811,
1827
]
],
[
[
2030,
2046
]
]
] |
from binascii import crc32
from contextlib import contextmanager
from datetime import datetime, timedelta, timezone
from pathlib import Path
from osgeo import gdal
import pytest
import rasterio
from click.testing import CliRunner
from rasterio import DatasetReader
from rasterio.enums import Compression
from rio_cogeo import cogeo
import eodatasets3
from eodatasets3.model import DatasetDoc
from tests import assert_file_structure
from tests.common import assert_same_as_file
from . import assert_image
h5py = pytest.importorskip(
"h5py",
reason="Extra dependencies needed to run wagl package test. "
"Try pip install eodatasets3[wagl]",
)
# These test datasets come from running `tests/integration/h5downsample.py` on a real
# wagl output.
WAGL_LANDSAT_OUTPUT: Path = (
Path(__file__).parent
/ "data/wagl-input/LC80920842016180LGN01/LC80920842016180LGN01.wagl.h5"
)
WAGL_SENTINEL_OUTPUT: Path = (
Path(__file__).parent
/ "data/wagl-input/S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09/"
"S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09.wagl.h5"
)
# The matching Level1 metadata (produced by landsat_l1_prepare.py)
L1_METADATA_PATH: Path = (
Path(__file__).parent
/ "data/wagl-input/LC08_L1TP_092084_20160628_20170323_01_T1.yaml"
)
S2_L1_METADATA_PATH: Path = (
Path(__file__).parent
/ "data/wagl-input/S2A_MSIL1C_20201031T004711_N0209_R102_T53JQJ_20201031T022859.odc-metadata.yaml"
)
def test_whole_landsat_wagl_package(
l1_ls8_dataset: DatasetDoc, l1_ls8_folder: Path, tmp_path: Path
):
out = tmp_path
from eodatasets3.scripts import packagewagl
# No warnings should be logged during package.
# We could tighten this to specific warnings if it proves too noisy, but it's
# useful for catching things like unclosed files.
with expect_no_warnings():
res = CliRunner().invoke(
packagewagl.run,
map(
str,
(WAGL_LANDSAT_OUTPUT, "--level1", L1_METADATA_PATH, "--output", out),
),
catch_exceptions=False,
)
# The last line of output ends with the dataset path.
words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1)
expected_folder = out / "ga_ls8c_ard_3/092/084/2016/06/28"
assert_file_structure(
expected_folder,
{
"ga_ls8c_ard_3-2-1_092084_2016-06-28_final.odc-metadata.yaml": "",
"ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml": "",
"ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif": "",
"ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif": "",
"ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif": "",
"ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif": "",
},
)
[output_metadata] = expected_folder.rglob("*.odc-metadata.yaml")
assert reported_metadata == str(
output_metadata
), "Cli didn't report the expected output path"
# Checksum should include all files other than itself.
[checksum_file] = expected_folder.rglob("*.sha1")
all_output_files = set(
p.relative_to(checksum_file.parent)
for p in expected_folder.rglob("*")
if p != checksum_file
)
files_in_checksum = {
Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines()
}
assert all_output_files == files_in_checksum
# Verify the computed contiguity looks the same. (metadata fields will depend on it)
[image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 1978, 1: 4184})
[image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 1979, 1: 4183})
assert_same_as_file(
{
"$schema": "https://schemas.opendatacube.org/dataset",
# A stable ID is taken from the WAGL doc.
"id": "787eb74c-e7df-43d6-b562-b796137330ae",
"label": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final",
"product": {
"href": "https://collections.dea.ga.gov.au/product/ga_ls8c_ard_3",
"name": "ga_ls8c_ard_3",
},
"crs": "epsg:32655",
"geometry": {
"coordinates": [
[
[386_170.809_107_605_5, -3_787_581.737_315_514_6],
[393_422.698_122_467_44, -3_754_539.332_156_166_4],
[402_370.463_567_812_2, -3_717_207.883_853_628_3],
[405_296.703_429_750_9, -3_713_106.822_612_258_6],
[405_302.307_692_307_7, -3_713_085.0],
[560_999.714_134_832_8, -3_745_790.820_117_99],
[591_203.344_050_317_7, -3_755_934.776_849_929_2],
[593_107.5, -3_756_373.614_649_681_4],
[593_066.089_284_004_1, -3_756_560.384_007_281_6],
[593_115.0, -3_756_576.810_780_758],
[593_115.0, -3_769_934.639_090_926_4],
[555_895.771_981_598_6, -3_924_204.823_795_153],
[554_316.830_569_659_8, -3_931_326.117_549_759],
[553_913.572_308_820_1, -3_932_420.854_216_015],
[550_505.686_408_068, -3_946_546.219_392_854],
[548_673.645_879_151_9, -3_946_645.831_477_726_3],
[548_393.076_923_077, -3_947_407.5],
[543_888.417_289_877_3, -3_946_906.014_911_907],
[535_826.373_854_402_9, -3_947_344.365_997_631_6],
[362_232.941_315_876_84, -3_905_575.014_223_633],
[362_109.819_892_458_1, -3_904_490.351_889_350_5],
[360_592.5, -3_904_126.385_350_318_6],
[361_565.347_585_850_9, -3_899_693.716_286_561_5],
[360_585.0, -3_891_057.151_898_734_3],
[366_618.297_729_428_5, -3_863_717.869_440_751],
[386_170.809_107_605_5, -3_787_581.737_315_514_6],
]
],
"type": "Polygon",
},
"grids": {
"default": {
"shape": [79, 78],
"transform": [
2981.153_846_153_846,
0.0,
360_585.0,
0.0,
-2966.202_531_645_569_7,
-3_713_085.0,
0.0,
0.0,
1.0,
],
},
"panchromatic": {
"shape": [157, 156],
"transform": [
1490.480_769_230_769_3,
0.0,
360_592.5,
0.0,
-1492.452_229_299_363,
-3_713_092.5,
0.0,
0.0,
1.0,
],
},
},
"properties": {
"datetime": datetime(2016, 6, 28, 0, 2, 28, 624_635),
"dea:dataset_maturity": "final",
"dtr:end_datetime": datetime(2016, 6, 28, 0, 2, 43, 114_771),
"dtr:start_datetime": datetime(2016, 6, 28, 0, 2, 14, 25815),
"eo:cloud_cover": 63.069_613_577_531_236,
"eo:gsd": 1490.480_769_230_769_3,
"eo:instrument": "OLI_TIRS",
"eo:platform": "landsat-8",
"eo:sun_azimuth": 33.655_125_34,
"eo:sun_elevation": 23.988_361_72,
"fmask:clear": 32.735_343_657_403_305,
"fmask:cloud": 63.069_613_577_531_236,
"fmask:cloud_shadow": 4.139_470_857_647_722,
"fmask:snow": 0.005_053_323_801_138_007,
"fmask:water": 0.050_518_583_616_596_675,
"gqa:abs_iterative_mean_x": 0.21,
"gqa:abs_iterative_mean_xy": 0.27,
"gqa:abs_iterative_mean_y": 0.18,
"gqa:abs_x": 0.3,
"gqa:abs_xy": 0.39,
"gqa:abs_y": 0.25,
"gqa:cep90": 0.46,
"gqa:iterative_mean_x": -0.17,
"gqa:iterative_mean_xy": 0.21,
"gqa:iterative_mean_y": 0.12,
"gqa:iterative_stddev_x": 0.19,
"gqa:iterative_stddev_xy": 0.25,
"gqa:iterative_stddev_y": 0.17,
"gqa:mean_x": -0.1,
"gqa:mean_xy": 0.14,
"gqa:mean_y": 0.1,
"gqa:stddev_x": 0.35,
"gqa:stddev_xy": 0.45,
"gqa:stddev_y": 0.29,
"landsat:collection_category": "T1",
"landsat:collection_number": 1,
"landsat:landsat_product_id": "LC08_L1TP_092084_20160628_20170323_01_T1",
"landsat:landsat_scene_id": "LC80920842016180LGN01",
"landsat:wrs_path": 92,
"landsat:wrs_row": 84,
"odc:dataset_version": "3.2.1",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": datetime(2019, 7, 11, 23, 29, 29, 21245),
"odc:producer": "ga.gov.au",
"odc:product_family": "ard",
"odc:region_code": "092084",
},
"measurements": {
"nbar_blue": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band02.tif"
},
"nbar_coastal_aerosol": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band01.tif"
},
"nbar_green": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band03.tif"
},
"nbar_nir": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band05.tif"
},
"nbar_panchromatic": {
"grid": "panchromatic",
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band08.tif",
},
"nbar_red": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band04.tif"
},
"nbar_swir_1": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band06.tif"
},
"nbar_swir_2": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_band07.tif"
},
"nbart_blue": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band02.tif"
},
"nbart_coastal_aerosol": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band01.tif"
},
"nbart_green": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band03.tif"
},
"nbart_nir": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band05.tif"
},
"nbart_panchromatic": {
"grid": "panchromatic",
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band08.tif",
},
"nbart_red": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band04.tif"
},
"nbart_swir_1": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band06.tif"
},
"nbart_swir_2": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_band07.tif"
},
"oa_azimuthal_exiting": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-exiting.tif"
},
"oa_azimuthal_incident": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_azimuthal-incident.tif"
},
"oa_combined_terrain_shadow": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_combined-terrain-shadow.tif"
},
"oa_exiting_angle": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_exiting-angle.tif"
},
"oa_fmask": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_fmask.tif"
},
"oa_incident_angle": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_incident-angle.tif"
},
"oa_nbar_contiguity": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbar-contiguity.tif"
},
"oa_nbart_contiguity": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_nbart-contiguity.tif"
},
"oa_relative_azimuth": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-azimuth.tif"
},
"oa_relative_slope": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_relative-slope.tif"
},
"oa_satellite_azimuth": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-azimuth.tif"
},
"oa_satellite_view": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_satellite-view.tif"
},
"oa_solar_azimuth": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-azimuth.tif"
},
"oa_solar_zenith": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_solar-zenith.tif"
},
"oa_time_delta": {
"path": "ga_ls8c_oa_3-2-1_092084_2016-06-28_final_time-delta.tif"
},
},
"accessories": {
"checksum:sha1": {
"path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.sha1"
},
"metadata:processor": {
"path": "ga_ls8c_ard_3-2-1_092084_2016-06-28_final.proc-info.yaml"
},
"thumbnail:nbar": {
"path": "ga_ls8c_nbar_3-2-1_092084_2016-06-28_final_thumbnail.jpg"
},
"thumbnail:nbart": {
"path": "ga_ls8c_nbart_3-2-1_092084_2016-06-28_final_thumbnail.jpg"
},
},
"lineage": {"level1": ["fb1c622e-90aa-50e8-9d5e-ad69db82d0f6"]},
},
output_metadata,
)
[proc_info] = expected_folder.rglob("*.proc-info.yaml")
assert_same_as_file(
{
"fmask": {
"parameters": {
"cloud_buffer_distance_metres": 0.0,
"cloud_shadow_buffer_distance_metres": 0.0,
"frantz_parallax_sentinel_2": False,
},
"percent_class_distribution": {
"clear": 32.735_343_657_403_305,
"cloud": 63.069_613_577_531_236,
"cloud_shadow": 4.139_470_857_647_722,
"snow": 0.005_053_323_801_138_007,
"water": 0.050_518_583_616_596_675,
},
},
"software_versions": [
{
"name": "modtran",
"url": "http://www.ontar.com/software/productdetails.aspx?item=modtran",
"version": "6.0.1",
},
{
"name": "wagl",
"url": "https://github.com/GeoscienceAustralia/wagl.git",
"version": "5.3.1+118.g9edd420",
},
{
"name": "eugl",
"url": "https://github.com/OpenDataCubePipelines/eugl.git",
"version": "0.0.2+69.gb1d1231",
},
{"name": "gverify", "url": None, "version": "v0.25c"},
{
"name": "fmask",
"url": "https://bitbucket.org/chchrsc/python-fmask",
"version": "0.5.3",
},
{
"name": "tesp",
"url": "https://github.com/OpenDataCubePipelines/tesp.git",
"version": "0.6.1",
},
{
"name": "eodatasets3",
"url": "https://github.com/GeoscienceAustralia/eo-datasets",
"version": eodatasets3.__version__,
},
],
},
proc_info,
ignore_fields=("gqa", "wagl"),
)
# All produced tifs should be valid COGs
for image in expected_folder.rglob("*.tif"):
assert cogeo.cog_validate(image), f"Failed COG validation: {image}"
# Check one of the images explicitly.
[image] = expected_folder.rglob("*_nbar_*_band08.tif")
with rasterio.open(image) as d:
d: DatasetReader
assert d.count == 1, "Expected one band"
assert d.nodata == -999.0
# Verify the pixel values haven't changed.
assert crc32(d.read(1).tobytes()) == 3_381_159_350
# (Rasterio's checksum is zero on some datasets for some reason? So we use crc above...)
assert d.checksum(1) == 58403
# The last overview is an odd size because of the tiny test data image size.
assert d.overviews(1) == [8, 16, 31]
assert d.driver == "GTiff"
assert d.dtypes == ("int16",)
assert d.compression == Compression.deflate
assert d.height == 157
assert d.width == 156
# The reduced resolution makes it hard to test the chosen block size...
assert d.block_shapes == [(26, 156)]
# Check the overviews use default 512 block size.
# (Rasterio doesn't seem to have an api for this?)
assert gdal.Open(str(image)).GetRasterBand(1).GetOverview(1).GetBlockSize() == [
512,
512,
], "Expected overviews to have a larger block size."
# OA data should have no overviews.
[*oa_images] = expected_folder.rglob("*_oa_*.tif")
assert oa_images
for image in oa_images:
# fmask is the only OA that should have overviews according to spec (and Josh).
if "fmask" in image.name:
assert_image(image, overviews=[8, 16, 26])
else:
assert_image(image, overviews=[])
# Check we didn't get height/width mixed up again :)
# (The small size of our test data makes this slightly silly, though...)
[thumb_path] = expected_folder.rglob("*_nbar_*.jpg")
assert_image(thumb_path, bands=3, shape=(7, 8))
def test_maturity_calculation():
from eodatasets3 import wagl
# Simplified. Only a few ancillary parts that matter to us.
wagl_doc = {
"ancillary": {
"aerosol": {
"id": ["99d73c48-9985-51d2-9639-d37bcdfe119e"],
"tier": "AATSR_CMP_MONTH",
"value": 0.047_813_605_517_148_97,
},
"brdf": {
"alpha_1": {
"band_1": 0.407_471_513_826_581_4,
"band_2": 0.407_472_440_438_251_7,
"band_3": 0.564_374_828_124_185,
"band_4": 0.452_550_357_394_962_35,
"band_5": 0.720_394_875_348_492_4,
"band_6": 0.475_077_458_430_413_66,
"band_7": 0.549_934_518_094_732,
},
"alpha_2": {
"band_1": 0.177_715_841_252_848_28,
"band_2": 0.177_716_091_422_247_15,
"band_3": 0.136_703_039_045_401_32,
"band_4": 0.167_629_648_004_969_63,
"band_5": 0.090_148_975_875_461_32,
"band_6": 0.121_059_126_731_143_88,
"band_7": 0.181_073_714_539_622_23,
},
"id": [
"2e95bdec-42e4-50a2-9a4c-1ea970e2696d",
"d02e1c58-7379-5c2d-a080-995838550d0d",
],
"tier": "DEFINITIVE",
},
"elevation": {
"id": [
"8ad73086-72cf-561a-aa0f-1e3c64d53384",
"e75ac77d-1ed0-55a5-888b-9ae48080eae9",
]
},
"ozone": {
"id": ["83914de1-c12e-5035-af8d-e2dc1baa54d4"],
"tier": "DEFINITIVE",
"value": 0.295,
},
"water_vapour": {
"id": ["e68035cd-1cd3-57fc-9b0e-2bf710a3df87"],
"tier": "DEFINITIVE",
"value": 0.490_000_009_536_743_16,
},
}
}
# July 2002 is when we consider our BRDF to be good enough: both Aqua
# and Terra satellites were now operational.
acq_before_brdf = datetime(2002, 6, 29, tzinfo=timezone.utc)
acq_after_brdf = datetime(2002, 7, 1, tzinfo=timezone.utc)
proc_after_brdf = acq_after_brdf + timedelta(days=7)
# Normal, final dataset. Processed just outside of NRT window.
assert (
wagl._determine_maturity(
acq_after_brdf, acq_after_brdf + timedelta(hours=49), wagl_doc
)
== "final"
)
# NRT when processed < 48 hours
assert (
wagl._determine_maturity(
acq_after_brdf, acq_after_brdf + timedelta(hours=1), wagl_doc
)
== "nrt"
)
assert (
wagl._determine_maturity(
acq_before_brdf, acq_before_brdf + timedelta(hours=47), wagl_doc
)
== "nrt"
)
# Before 2001: final if water vapour is definitive.
assert (
wagl._determine_maturity(
acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc
)
== "final"
)
# Interim whenever water vapour is fallback.
wagl_doc["ancillary"]["water_vapour"]["tier"] = "FALLBACK_DATASET"
assert (
wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim"
)
assert (
wagl._determine_maturity(
acq_before_brdf, acq_before_brdf + timedelta(days=3), wagl_doc
)
== "interim"
)
wagl_doc["ancillary"]["water_vapour"]["tier"] = "DEFINITIVE"
# Fallback BRDF (when at least one is fallback)
wagl_doc["ancillary"]["brdf"]["tier"] = "FALLBACK_DEFAULT"
assert (
wagl._determine_maturity(acq_after_brdf, proc_after_brdf, wagl_doc) == "interim"
)
@contextmanager
def expect_no_warnings():
"""Throw an assertion error if any warnings are produced."""
with pytest.warns(None) as warning_record:
yield
# We could tighten this to specific warnings if it proves too noisy, but it's
# useful for catching things like unclosed files.
if warning_record:
messages = "\n".join(f"- {w.message} ({w})\n" for w in warning_record)
raise AssertionError(f"Expected no warnings to be produced, got:\n {messages}")
def test_sentinel_wagl_package(tmp_path: Path):
out = tmp_path
from eodatasets3.scripts import packagewagl
# No warnings should have been logged during package.
# We could tighten this to specific warnings if it proves too noisy, but it's
# useful for catching things like unclosed files.
with expect_no_warnings():
res = CliRunner().invoke(
packagewagl.run,
map(
str,
(
WAGL_SENTINEL_OUTPUT,
"--level1",
S2_L1_METADATA_PATH,
"--output",
out,
# Our weird scaled test dataset resolution
"--oa-resolution",
998.1818181818181,
),
),
catch_exceptions=False,
)
# The last line of output ends with the dataset path.
words, reported_metadata = res.output.splitlines()[-1].rsplit(" ", 1)
expected_folder = out / "ga_s2am_ard_3/53/JQJ/2020/10/31"
assert_file_structure(
expected_folder,
{
"20201031T022859": {
"ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.odc-metadata.yaml": "",
"ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml": "",
"ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif": "",
"ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif": "",
"ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif": "",
"ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif": "",
}
},
)
[output_metadata] = expected_folder.rglob("*.odc-metadata.yaml")
# Checksum should include all files other than itself.
[checksum_file] = expected_folder.rglob("*.sha1")
all_output_files = set(
p.relative_to(checksum_file.parent)
for p in expected_folder.rglob("*")
if p != checksum_file and not p.is_dir()
)
files_in_checksum = {
Path(line.split("\t")[1]) for line in checksum_file.read_text().splitlines()
}
assert all_output_files == files_in_checksum
# Verify the computed contiguity looks the same. (metadata fields will depend on it)
[image] = expected_folder.rglob("*_oa_*nbar-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733})
[image] = expected_folder.rglob("*_oa_*nbart-contiguity.tif")
assert_image(image, nodata=255, unique_pixel_counts={0: 5367, 1: 6733})
assert_same_as_file(
{
"$schema": "https://schemas.opendatacube.org/dataset",
"id": "14cfa990-7e2f-4f0c-bd5e-b4cb28c27e8d",
"label": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final",
"product": {
"name": "ga_s2am_ard_3",
"href": "https://collections.dea.ga.gov.au/product/ga_s2am_ard_3",
},
"crs": "epsg:32753",
"geometry": {
"type": "Polygon",
"coordinates": [
[
[731901.8181818182, 6790240.0],
[728854.7368421053, 6790240.0],
[752174.154338321, 6890002.646902946],
[759379.8080509851, 6900040.0],
[762411.0326110948, 6900040.0],
[763218.8851094716, 6900040.0],
[809760.0, 6900040.0],
[809760.0, 6790240.0],
[732900.0, 6790240.0],
[731901.8181818182, 6790240.0],
]
],
},
"grids": {
"default": {
"shape": [110, 110],
"transform": [
998.1818181818181,
0.0,
699960.0,
0.0,
-998.1818181818181,
6900040.0,
0.0,
0.0,
1.0,
],
},
"a": {
"shape": [55, 55],
"transform": [
1996.3636363636363,
0.0,
699960.0,
0.0,
-1996.3636363636363,
6900040.0,
0.0,
0.0,
1.0,
],
},
"b": {
"shape": [19, 19],
"transform": [
5778.9473684210525,
0.0,
699960.0,
0.0,
-5778.9473684210525,
6900040.0,
0.0,
0.0,
1.0,
],
},
"c": {
"shape": [19, 19],
"transform": [
5778.947368421053,
0.0,
699960.0,
0.0,
-5778.947368421053,
6900040.0,
0.0,
0.0,
1.0,
],
},
},
"properties": {
"datetime": "2020-10-31T00:55:10.954414",
"dea:dataset_maturity": "final",
"eo:cloud_cover": 11.063428320692061,
"eo:gsd": 998.1818181818181,
"eo:instrument": "MSI",
"eo:platform": "sentinel-2a",
"eo:sun_azimuth": 62.9424764928076,
"eo:sun_elevation": 26.8398246645449,
"fmask:clear": 73.65382838133374,
"fmask:cloud": 11.063428320692061,
"fmask:cloud_shadow": 0.6983135097842945,
"fmask:snow": 14.583962676987106,
"fmask:water": 0.0004671112027989303,
"gqa:abs_iterative_mean_x": 0.42,
"gqa:abs_iterative_mean_xy": 0.53,
"gqa:abs_iterative_mean_y": 0.32,
"gqa:abs_x": 0.69,
"gqa:abs_xy": 1.07,
"gqa:abs_y": 0.82,
"gqa:cep90": 0.97,
"gqa:iterative_mean_x": 0.4,
"gqa:iterative_mean_xy": 0.4,
"gqa:iterative_mean_y": 0.04,
"gqa:iterative_stddev_x": 0.29,
"gqa:iterative_stddev_xy": 0.53,
"gqa:iterative_stddev_y": 0.44,
"gqa:mean_x": 0.38,
"gqa:mean_xy": 0.39,
"gqa:mean_y": -0.07,
"gqa:stddev_x": 1.18,
"gqa:stddev_xy": 2.24,
"gqa:stddev_y": 1.9,
"odc:dataset_version": "3.2.1",
"odc:file_format": "GeoTIFF",
"odc:processing_datetime": "2021-02-10T03:25:22.635668",
"odc:producer": "ga.gov.au",
"odc:product_family": "ard",
"odc:region_code": "53JQJ",
"sat:orbit_state": "descending",
"sat:relative_orbit": 102,
"sentinel:datastrip_id": "S2A_OPER_MSI_L1C_DS_EPAE_20201031T022859_S20201031T004711_N02.09",
"sentinel:sentinel_tile_id": "S2A_OPER_MSI_L1C_TL_EPAE_20201031T022859_A027984_T53JQJ_N02.09",
"sentinel:datatake_start_datetime": "2020-10-31T02:28:59",
},
"measurements": {
"nbar_blue": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band02.tif"
},
"nbar_coastal_aerosol": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band01.tif",
"grid": "b",
},
"nbar_green": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band03.tif"
},
"nbar_nir_1": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08.tif"
},
"nbar_nir_2": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band08a.tif",
"grid": "a",
},
"nbar_red": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band04.tif"
},
"nbar_red_edge_1": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band05.tif",
"grid": "a",
},
"nbar_red_edge_2": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band06.tif",
"grid": "a",
},
"nbar_red_edge_3": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band07.tif",
"grid": "a",
},
"nbar_swir_2": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band11.tif",
"grid": "a",
},
"nbar_swir_3": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_band12.tif",
"grid": "a",
},
"nbart_blue": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band02.tif"
},
"nbart_coastal_aerosol": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band01.tif",
"grid": "b",
},
"nbart_green": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band03.tif"
},
"nbart_nir_1": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08.tif"
},
"nbart_nir_2": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band08a.tif",
"grid": "a",
},
"nbart_red": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band04.tif"
},
"nbart_red_edge_1": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band05.tif",
"grid": "a",
},
"nbart_red_edge_2": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band06.tif",
"grid": "a",
},
"nbart_red_edge_3": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band07.tif",
"grid": "a",
},
"nbart_swir_2": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band11.tif",
"grid": "a",
},
"nbart_swir_3": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_band12.tif",
"grid": "a",
},
"oa_azimuthal_exiting": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-exiting.tif"
},
"oa_azimuthal_incident": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_azimuthal-incident.tif"
},
"oa_combined_terrain_shadow": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_combined-terrain-shadow.tif"
},
"oa_exiting_angle": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_exiting-angle.tif"
},
"oa_fmask": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_fmask.tif",
"grid": "c",
},
"oa_incident_angle": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_incident-angle.tif"
},
"oa_nbar_contiguity": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbar-contiguity.tif"
},
"oa_nbart_contiguity": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_nbart-contiguity.tif"
},
"oa_relative_azimuth": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-azimuth.tif"
},
"oa_relative_slope": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_relative-slope.tif"
},
"oa_satellite_azimuth": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-azimuth.tif"
},
"oa_satellite_view": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_satellite-view.tif"
},
"oa_solar_azimuth": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-azimuth.tif"
},
"oa_solar_zenith": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_solar-zenith.tif"
},
"oa_time_delta": {
"path": "ga_s2am_oa_3-2-1_53JQJ_2020-10-31_final_time-delta.tif"
},
},
"accessories": {
"checksum:sha1": {
"path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.sha1"
},
"metadata:processor": {
"path": "ga_s2am_ard_3-2-1_53JQJ_2020-10-31_final.proc-info.yaml"
},
"thumbnail:nbar": {
"path": "ga_s2am_nbar_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg"
},
"thumbnail:nbart": {
"path": "ga_s2am_nbart_3-2-1_53JQJ_2020-10-31_final_thumbnail.jpg"
},
},
"lineage": {"level1": ["e27200c1-0a9c-5e24-bfe1-bbbb3f3bdedc"]},
},
output_metadata,
)
[proc_info] = expected_folder.rglob("*.proc-info.yaml")
assert_same_as_file(
{
"fmask": {
"parameters": {
"cloud_buffer_distance_metres": 0.0,
"cloud_shadow_buffer_distance_metres": 0.0,
"frantz_parallax_sentinel_2": False,
},
"percent_class_distribution": {
"clear": 73.65382838133374,
"cloud": 11.063428320692061,
"cloud_shadow": 0.6983135097842945,
"snow": 14.583962676987106,
"water": 0.0004671112027989303,
},
},
"software_versions": [
{
"name": "modtran",
"url": "http://www.ontar.com/software/productdetails.aspx?item=modtran",
"version": "6.0.1",
},
{
"name": "wagl",
"url": "https://github.com/GeoscienceAustralia/wagl.git",
"version": "5.4.1",
},
{
"name": "eugl",
"url": "https://github.com/OpenDataCubePipelines/eugl.git",
"version": "0.2.1",
},
{"name": "gverify", "url": None, "version": "v0.25c"},
{
"name": "fmask",
"url": "https://bitbucket.org/chchrsc/python-fmask",
"version": "0.5.4",
},
{
"name": "tesp",
"url": "https://github.com/OpenDataCubePipelines/tesp.git",
"version": "0.6.2",
},
{
"name": "eodatasets3",
"url": "https://github.com/GeoscienceAustralia/eo-datasets",
"version": eodatasets3.__version__,
},
],
},
proc_info,
ignore_fields=("gqa", "wagl"),
)
# All produced tifs should be valid COGs
for image in expected_folder.rglob("*.tif"):
assert cogeo.cog_validate(image), f"Failed COG validation: {image}"
| [
[
[
21,
26
],
[
19840,
19845
]
],
[
[
50,
64
],
[
25218,
25232
]
],
[
[
86,
94
],
[
9643,
9651
],
[
9770,
9778
],
[
9850,
9858
],
[
11748,
11756
],
[
23587,
23595
],
[
23652,
23660
]
],
[
[
96,
105
],
[
23733,
23742
],
[
23911,
23920
],
[
24105,
24114
],
[
24261,
24270
],
[
24475,
24484
],
[
24861,
24870
]
],
[
[
107,
115
],
[
23616,
23624
],
[
23680,
23688
]
],
[
[
136,
140
],
[
792,
796
],
[
779,
783
],
[
927,
931
],
[
914,
918
],
[
1216,
1220
],
[
1203,
1207
],
[
1345,
1349
],
[
1332,
1336
],
[
1558,
1562
],
[
1574,
1578
],
[
5625,
5629
],
[
25755,
25759
],
[
30596,
30600
]
],
[
[
160,
164
],
[
20588,
20592
]
],
[
[
172,
178
],
[
514,
520
],
[
25333,
25339
]
],
[
[
186,
194
],
[
19638,
19646
]
],
[
[
221,
230
],
[
1883,
1892
],
[
26070,
26079
]
],
[
[
252,
265
],
[
19676,
19689
]
],
[
[
293,
304
],
[
20255,
20266
]
],
[
[
327,
332
],
[
19466,
19471
],
[
44932,
44937
]
],
[
[
341,
352
],
[
19222,
19233
],
[
44688,
44699
]
],
[
[
383,
393
],
[
1531,
1541
]
],
[
[
412,
433
],
[
2325,
2346
],
[
26775,
26796
]
],
[
[
459,
478
],
[
6136,
6155
],
[
17304,
17323
],
[
31107,
31126
],
[
42818,
42837
]
],
[
[
493,
505
],
[
5916,
5928
],
[
6059,
6071
],
[
21024,
21036
],
[
21093,
21105
],
[
21323,
21335
],
[
30887,
30899
],
[
31030,
31042
]
],
[
[
507,
511
]
],
[
[
758,
777
],
[
779,
783
],
[
1987,
2006
]
],
[
[
892,
912
],
[
914,
918
],
[
26195,
26215
]
],
[
[
1185,
1201
],
[
1203,
1207
],
[
2020,
2036
]
],
[
[
1311,
1330
],
[
1332,
1336
],
[
26269,
26288
]
],
[
[
1478,
1509
]
],
[
[
21377,
21402
]
],
[
[
25237,
25255
],
[
1847,
1865
],
[
26034,
26052
]
],
[
[
25718,
25744
]
]
] |
import scrapy, re
from alleco.objects.official import Official
class ross_t(scrapy.Spider):
name = "ross_t"
muniName = "ROSS"
muniType = "TOWNSHIP"
complete = True
def start_requests(self):
urls = ['https://www.ross.pa.us/245/Board-of-Commissioners',
'https://www.ross.pa.us/225/Other-Elected-Officials']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
if response.url[-2]=='r':
for quote in response.xpath('//div[@class="cpTabPanels"]'):
arr = [i.strip() for i in quote.xpath('.//text()').getall() if len(i.strip())>0 and '$' not in i]
temp = []
peeps = []
for i in arr:
temp.append(i)
if '@' in i:
peeps.append(temp)
temp = []
for pers in peeps:
name = self._name(pers[1]) if "Commissioner" in pers[1] else None
yield Official(
muniName=self.muniName,
muniType=self.muniType,
office="COMMISSIONER",
district=pers[0].upper(),
name=name,
email=pers[-1],
vacant=name==None,
url=response.url)
elif response.url[-2]=='l':
for quote in response.xpath('//div[contains(h2/text(),"Ross Tax Collector")]/p[1]'):
yield Official(
muniName=self.muniName,
muniType=self.muniType,
office="TAX COLLECTOR",
name=quote.xpath('text()[1]').get(),
email=quote.xpath('a/@href').get(),
phone=quote.xpath('text()[2]').get(),
url=response.url)
def _name(self,string):
return string.split(",")[0][13:] | [
[
[
7,
13
],
[
80,
86
],
[
357,
363
]
],
[
[
15,
17
]
],
[
[
55,
63
],
[
870,
878
],
[
1228,
1236
]
],
[
[
73,
79
]
]
] |
"""
状态模式
"""
from __future__ import annotations
from abc import ABC, abstractmethod
class Context:
# 状态(状态模式的判断)
_state: State = None
def __init__(self, state: State) -> None:
self.transition_to(state)
def transition_to(self, state: State) -> None:
# 根据不同状态,切换上下文
self._state = state
self._state.context = self
# 最终执行器的操作
def request1(self):
self._state.handle1()
def request2(self):
self._state.handle2()
class State(ABC):
@property
def context(self) -> Context:
return self._context
@context.setter
def context(self, context: Context) -> None:
self._context = context
@abstractmethod
def handle1(self) -> None:
pass
@abstractmethod
def handle2(self) -> None:
pass
class ConcreteStateA(State):
def handle1(self) -> None:
print('执行了A—1')
self.context.transition_to(ConcreteStateB())
def handle2(self) -> None:
print('执行了A-2')
class ConcreteStateB(State):
def handle1(self) -> None:
print('执行了B—1')
def handle2(self) -> None:
print('执行了B—2')
self.context.transition_to(ConcreteStateA())
if __name__ == '__main__':
context = Context(ConcreteStateA())
context.request1()
context.request2()
context.request2()
| [
[
[
36,
47
]
],
[
[
64,
67
],
[
502,
505
]
],
[
[
69,
83
],
[
693,
707
],
[
758,
772
]
],
[
[
92,
99
],
[
1250,
1257
],
[
547,
554
],
[
637,
644
]
],
[
[
496,
501
],
[
840,
845
],
[
1035,
1040
],
[
175,
180
],
[
261,
266
]
],
[
[
825,
839
],
[
1258,
1272
],
[
1189,
1203
]
],
[
[
1020,
1034
],
[
938,
952
]
],
[
[
1240,
1247
],
[
1280,
1287
],
[
1303,
1310
],
[
1326,
1333
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import json
from mcfw.rpc import arguments, returns
from rogerthat.models import Message
from rogerthat.models.properties.forms import FormResult
from rogerthat.rpc import users
from rogerthat.service.api import messaging
from rogerthat.to.messaging.forms import TextBlockFormTO, TextBlockTO, FormTO
from rogerthat.to.messaging.service_callback_results import FormAcknowledgedCallbackResultTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.utils.app import get_app_user_tuple
from solutions import translate
from solutions.common.dal import get_solution_main_branding, get_solution_settings
from solutions.common.models import SolutionInboxMessage
@arguments(service_user=users.User, service_identity=unicode, message_key=unicode, app_user=users.User, name=unicode,
answer_id=unicode, parent_inbox_message=SolutionInboxMessage)
def process_updated_customer_signup_message(service_user, service_identity, message_key, app_user, name, answer_id,
parent_inbox_message):
# type: (users.User, unicode, unicode, users.User, unicode, unicode, SolutionInboxMessage) -> None
from solutions.common.bizz.messaging import MESSAGE_TAG_DENY_SIGNUP
from solutions.common.restapi.services import rest_create_service_from_signup
with users.set_user(service_user):
sln_settings = get_solution_settings(service_user)
if answer_id == 'decline':
widget = TextBlockTO()
widget.max_chars = 1024
form = TextBlockFormTO()
form.type = TextBlockTO.TYPE
form.widget = widget
form.positive_button = translate(sln_settings.main_language, 'Confirm')
form.negative_button = translate(sln_settings.main_language, 'Cancel')
form.javascript_validation = """function run(result) {
return result.value ? true : '%s';
}""" % translate(sln_settings.main_language, 'this_field_is_required', _duplicate_backslashes=True)
human_user, app_id = get_app_user_tuple(app_user)
messaging.send_form(parent_key=parent_inbox_message.message_key,
parent_message_key=parent_inbox_message.message_key,
message=translate(sln_settings.main_language, 'signup_not_ok'),
member=human_user.email(),
app_id=app_id,
flags=Message.FLAG_AUTO_LOCK,
branding=get_solution_main_branding(service_user).branding_key,
tag=json.dumps({'__rt__.tag': MESSAGE_TAG_DENY_SIGNUP,
'signup_key': parent_inbox_message.category_key}),
form=form,
service_identity=service_identity,
alert_flags=Message.ALERT_FLAG_VIBRATE)
elif answer_id == 'approve':
result = rest_create_service_from_signup(parent_inbox_message.category_key,
force=True) # type: CreateServiceStatusTO
if not result.success:
messaging.send(parent_message_key=message_key,
message=result.errormsg,
answers=[],
flags=Message.FLAG_ALLOW_DISMISS,
branding=get_solution_main_branding(service_user).branding_key,
tag=None,
service_identity=service_identity)
@returns(FormAcknowledgedCallbackResultTO)
@arguments(service_user=users.User, status=int, form_result=FormResult, answer_id=unicode, member=unicode,
message_key=unicode, tag=unicode, received_timestamp=int, acked_timestamp=int, parent_message_key=unicode,
result_key=unicode, service_identity=unicode, user_details=[UserDetailsTO])
def deny_signup(service_user, status, form_result, answer_id, member, message_key, tag,
received_timestamp, acked_timestamp, parent_message_key, result_key,
service_identity, user_details):
from solutions.common.restapi import rest_customer_signup_reply
with users.set_user(service_user):
if answer_id == FormTO.POSITIVE:
tag_dict = json.loads(tag)
rest_customer_signup_reply(tag_dict['signup_key'], form_result.result.value)
| [
[
[
647,
651
],
[
3253,
3257
],
[
5020,
5024
]
],
[
[
674,
683
],
[
1307,
1316
],
[
4316,
4325
]
],
[
[
685,
692
],
[
4273,
4280
]
],
[
[
722,
729
],
[
3097,
3104
],
[
3557,
3564
],
[
4040,
4047
]
],
[
[
776,
786
],
[
4375,
4385
]
],
[
[
813,
818
],
[
1330,
1335
],
[
1398,
1403
],
[
4339,
4344
],
[
1946,
1951
],
[
4926,
4931
]
],
[
[
853,
862
],
[
2707,
2716
],
[
3857,
3866
]
],
[
[
904,
919
],
[
2160,
2175
]
],
[
[
921,
932
],
[
2091,
2102
],
[
2202,
2213
]
],
[
[
934,
940
],
[
4980,
4986
]
],
[
[
1001,
1033
],
[
4281,
4313
]
],
[
[
1067,
1080
],
[
4611,
4624
]
],
[
[
1113,
1131
],
[
2666,
2684
]
],
[
[
1154,
1163
],
[
2287,
2296
],
[
2371,
2380
],
[
2540,
2549
],
[
2897,
2906
]
],
[
[
1197,
1223
],
[
3162,
3188
],
[
4108,
4134
]
],
[
[
1225,
1246
],
[
1999,
2020
]
],
[
[
1283,
1303
],
[
1475,
1495
]
],
[
[
1501,
1540
]
],
[
[
4631,
4642
]
]
] |
import os
import time
import pickle
import math
import numpy as np
import linecache
import matplotlib.pyplot as plt
# from matplotlib.pyplot import MultipleLocator
import grid
data_path = 'E:/dataset/didi/processed'
save_path = 'E:/dataset/didi/processed/order_20161101_sampled_value_map_fig'
data_file_name = 'processed_data' # '.pkl' will be added for binary file
value_map_file_name = 'value_map' # '.pkl' will be added for binary file
n_time_unit = 144
size_hexagon_to_edge = 0.0048
hexagon_size_factor_for_plot = 1
range_map_longitude = [103.96, 104.18]
range_map_latitude = [30.59, 30.77]
size_hexagon = size_hexagon_to_edge * 2 / math.sqrt(3) # length to the point
if not os.path.exists(save_path):
os.mkdir(save_path)
with open(os.path.join(data_path, data_file_name+'.pkl'), 'rb') as f:
data = pickle.load(f)
with open(os.path.join(data_path, value_map_file_name+'.pkl'), 'rb') as f:
value_map = pickle.load(f)
# make hexagon
grid = grid.Hexagon(size_to_edge=size_hexagon_to_edge*hexagon_size_factor_for_plot)
grid_interval_lo = size_hexagon * 1.5
grid_interval_la = size_hexagon_to_edge * 2
grid_centers = []
for la in np.arange(range_map_latitude[1]-size_hexagon, range_map_latitude[0]-0.00001, -grid_interval_la):
row = []
count = 0
for lo in np.arange(range_map_longitude[0], range_map_longitude[1]+0.00001, grid_interval_lo):
if count % 2 == 0:
row.append([lo, la])
else:
row.append([lo, la+size_hexagon_to_edge])
count += 1
grid_centers.append(row)
grid_centers_mat = np.array(grid_centers)
shape_grid_centers_mat = grid_centers_mat.shape
n_grids = shape_grid_centers_mat[0]*shape_grid_centers_mat[1]
grid_index_mat = np.arange(n_grids).reshape(shape_grid_centers_mat[:2])
print('shape of grids is', shape_grid_centers_mat)
print('number of grids is', n_grids)
grid_centers_flat_T = grid_centers_mat.reshape(n_grids, 2).T
max_value = np.max(value_map)
min_value = np.min(value_map)
print('maximum value in value_map is', max_value)
print('minimum value in value_map is', min_value)
# value_map = (value_map - min_value) / max_value
# max_value = np.max(value_map)
# min_value = np.min(value_map)
# print('maximum value in value_map after normalization is', max_value)
# print('minimum value in value_map after normalization is', min_value)
for t in range(n_time_unit):
fig = plt.figure()
plt.title('value map of time unit %d' % t)
plt.scatter(grid_centers_flat_T[0], grid_centers_flat_T[1], c=value_map[t], marker='H', s=100, alpha=0.5)
plt.colorbar()
fig.savefig(os.path.join(save_path, '%d.jpg'%t))
| [
[
[
7,
9
],
[
687,
689
],
[
718,
720
],
[
749,
751
],
[
845,
847
],
[
2605,
2607
]
],
[
[
17,
21
]
],
[
[
29,
35
],
[
820,
826
],
[
926,
932
]
],
[
[
44,
48
],
[
643,
647
]
],
[
[
56,
67
],
[
1154,
1156
],
[
1292,
1294
],
[
1573,
1575
],
[
1724,
1726
],
[
1944,
1946
],
[
1974,
1976
]
],
[
[
75,
84
]
],
[
[
92,
116
],
[
2400,
2403
],
[
2417,
2420
],
[
2464,
2467
],
[
2574,
2577
]
],
[
[
172,
176
],
[
966,
970
]
],
[
[
178,
187
],
[
762,
771
],
[
858,
867
]
],
[
[
218,
227
],
[
702,
711
],
[
727,
736
],
[
2618,
2627
]
],
[
[
295,
309
],
[
773,
787
]
],
[
[
369,
388
],
[
869,
888
]
],
[
[
444,
455
],
[
2376,
2387
]
],
[
[
462,
482
],
[
616,
636
],
[
992,
1012
],
[
1100,
1120
],
[
1482,
1502
]
],
[
[
492,
520
],
[
1013,
1041
]
],
[
[
525,
544
],
[
1302,
1321
],
[
1326,
1345
]
],
[
[
564,
582
],
[
1164,
1182
],
[
1200,
1218
]
],
[
[
601,
613
],
[
1062,
1074
],
[
1186,
1198
]
],
[
[
806,
807
],
[
832,
833
]
],
[
[
813,
817
]
],
[
[
907,
908
],
[
938,
939
]
],
[
[
914,
923
],
[
1951,
1960
],
[
1981,
1990
],
[
2526,
2535
]
],
[
[
959,
963
]
],
[
[
1043,
1059
],
[
1358,
1374
]
],
[
[
1081,
1097
],
[
1232,
1248
]
],
[
[
1126,
1138
],
[
1528,
1540
],
[
1582,
1594
]
],
[
[
1148,
1150
],
[
1432,
1434
],
[
1479,
1481
]
],
[
[
1255,
1258
],
[
1416,
1419
],
[
1463,
1466
],
[
1548,
1551
]
],
[
[
1268,
1273
],
[
1388,
1393
],
[
1513,
1518
]
],
[
[
1286,
1288
],
[
1428,
1430
],
[
1475,
1477
]
],
[
[
1554,
1570
],
[
1621,
1637
],
[
1891,
1907
]
],
[
[
1596,
1618
],
[
1654,
1676
],
[
1680,
1702
],
[
1751,
1773
],
[
1807,
1829
]
],
[
[
1644,
1651
],
[
1734,
1741
],
[
1859,
1866
],
[
1916,
1923
]
],
[
[
1707,
1721
]
],
[
[
1869,
1888
],
[
2476,
2495
],
[
2500,
2519
]
],
[
[
1932,
1941
],
[
2031,
2040
]
],
[
[
1962,
1971
],
[
2081,
2090
]
],
[
[
2365,
2366
],
[
2457,
2458
],
[
2536,
2537
],
[
2638,
2639
]
],
[
[
2394,
2397
],
[
2593,
2596
]
]
] |
w = int(input())
h = int(input())
for i in range(h):
output = str()
for j in range(w):
if (i + j) % 2 == 0:
output += '0'
else:
output += '1'
print(output) | [
[
[
0,
1
],
[
88,
89
]
],
[
[
17,
18
],
[
50,
51
]
],
[
[
39,
40
],
[
100,
101
]
],
[
[
56,
62
],
[
123,
129
],
[
153,
159
],
[
178,
184
]
],
[
[
77,
78
],
[
104,
105
]
]
] |
from bridges.symbol import *
class Text(Symbol):
def __init__(self, label = None):
super(Text, self).__init__()
if label is not None:
self._text = label
else:
self._text = ""
self.stroke_width = 1.0
self._font_size = None
self._anchor_alignment_lr = None
self._anchor_alignment_tb = None
self._locx = 0.0
self._locy = 0.0
def get_shape_type(self):
return "text"
@property
def text(self):
return self._text
@text.setter
def text(self, t):
self._text = t
@property
def font_size(self):
return self._font_size
@font_size.setter
def font_size(self, s):
if(s < 0.0):
raise ValueError("Font size is too small")
self._font_size = s
def set_anchor_alignment(self, typeLR, typeTB):
self._anchor_alignment_lr = typeLR
self._anchor_alignment_tb = typeTB
def set_anchor_location(self, x, y):
self._locx = x
self._locy = y
def get_json_representation(self):
json_builder = super(Text, self).get_json_representation()
json_builder['anchor-location'] = [self._locx, self._locy]
json_builder['text'] = self.text
if self.font_size is not None:
json_builder['font-size'] =self.font_size
if self._anchor_alignment_lr is not None:
json_builder['anchor-alignmentLR'] = self._anchor_alignment_lr
if self._anchor_alignment_tb is not None:
json_builder['anchor-alignmentTB'] = self._anchor_alignment_tb
return json_builder
| [
[
[
27,
28
],
[
42,
48
]
],
[
[
37,
41
],
[
105,
109
],
[
1129,
1133
]
]
] |
import torch
import torch.nn as nn
import rdkit.Chem as Chem
import torch.nn.functional as F
from hgraph.nnutils import *
from hgraph.encoder import IncHierMPNEncoder
from hgraph.mol_graph import MolGraph
from hgraph.inc_graph import IncTree, IncGraph
class HTuple():
def __init__(self, node=None, mess=None, vmask=None, emask=None):
self.node, self.mess = node, mess
self.vmask, self.emask = vmask, emask
class HierMPNDecoder(nn.Module):
def __init__(self, vocab, avocab, rnn_type, embed_size, hidden_size, latent_size, depthT, depthG, dropout, attention=False):
super(HierMPNDecoder, self).__init__()
self.vocab = vocab
self.avocab = avocab
self.hidden_size = hidden_size
self.embed_size = embed_size
self.latent_size = latent_size
self.use_attention = attention
self.itensor = torch.LongTensor([]).cuda()
self.hmpn = IncHierMPNEncoder(vocab, avocab, rnn_type, embed_size, hidden_size, depthT, depthG, dropout)
self.rnn_cell = self.hmpn.tree_encoder.rnn
self.E_assm = self.hmpn.E_i
self.E_order = torch.eye(MolGraph.MAX_POS).cuda()
self.topoNN = nn.Sequential(
nn.Linear(hidden_size + latent_size, hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, 1)
)
self.clsNN = nn.Sequential(
nn.Linear(hidden_size + latent_size, hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, vocab.size()[0])
)
self.iclsNN = nn.Sequential(
nn.Linear(hidden_size + latent_size, hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, vocab.size()[1])
)
self.matchNN = nn.Sequential(
nn.Linear(hidden_size + embed_size + MolGraph.MAX_POS, hidden_size),
nn.ReLU(),
)
self.W_assm = nn.Linear(hidden_size, latent_size)
if latent_size != hidden_size:
self.W_root = nn.Linear(latent_size, hidden_size)
if self.use_attention:
self.A_topo = nn.Linear(hidden_size, latent_size)
self.A_cls = nn.Linear(hidden_size, latent_size)
self.A_assm = nn.Linear(hidden_size, latent_size)
self.topo_loss = nn.BCEWithLogitsLoss(size_average=False)
self.cls_loss = nn.CrossEntropyLoss(size_average=False)
self.icls_loss = nn.CrossEntropyLoss(size_average=False)
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
def apply_tree_mask(self, tensors, cur, prev):
fnode, fmess, agraph, bgraph, cgraph, scope = tensors
agraph = agraph * index_select_ND(cur.emask, 0, agraph)
bgraph = bgraph * index_select_ND(cur.emask, 0, bgraph)
cgraph = cgraph * index_select_ND(prev.vmask, 0, cgraph)
return fnode, fmess, agraph, bgraph, cgraph, scope
def apply_graph_mask(self, tensors, hgraph):
fnode, fmess, agraph, bgraph, scope = tensors
agraph = agraph * index_select_ND(hgraph.emask, 0, agraph)
bgraph = bgraph * index_select_ND(hgraph.emask, 0, bgraph)
return fnode, fmess, agraph, bgraph, scope
def update_graph_mask(self, graph_batch, new_atoms, hgraph):
new_atom_index = hgraph.vmask.new_tensor(new_atoms)
hgraph.vmask.scatter_(0, new_atom_index, 1)
new_atom_set = set(new_atoms)
new_bonds = [] #new bonds are the subgraph induced by new_atoms
for zid in new_atoms:
for nid in graph_batch[zid]:
if nid not in new_atom_set: continue
new_bonds.append( graph_batch[zid][nid]['mess_idx'] )
new_bond_index = hgraph.emask.new_tensor(new_bonds)
if len(new_bonds) > 0:
hgraph.emask.scatter_(0, new_bond_index, 1)
return new_atom_index, new_bond_index
def init_decoder_state(self, tree_batch, tree_tensors, src_root_vecs):
batch_size = len(src_root_vecs)
num_mess = len(tree_tensors[1])
agraph = tree_tensors[2].clone()
bgraph = tree_tensors[3].clone()
for i,tup in enumerate(tree_tensors[-1]):
root = tup[0]
assert agraph[root,-1].item() == 0
agraph[root,-1] = num_mess + i
for v in tree_batch.successors(root):
mess_idx = tree_batch[root][v]['mess_idx']
assert bgraph[mess_idx,-1].item() == 0
bgraph[mess_idx,-1] = num_mess + i
new_tree_tensors = tree_tensors[:2] + [agraph, bgraph] + tree_tensors[4:]
htree = HTuple()
htree.mess = self.rnn_cell.get_init_state(tree_tensors[1], src_root_vecs)
htree.emask = torch.cat( [bgraph.new_zeros(num_mess), bgraph.new_ones(batch_size)], dim=0 )
return htree, new_tree_tensors
def attention(self, src_vecs, batch_idx, queries, W_att):
size = batch_idx.size()
if batch_idx.dim() > 1:
batch_idx = batch_idx.view(-1)
queries = queries.view(-1, queries.size(-1))
src_vecs = src_vecs.index_select(0, batch_idx)
att_score = torch.bmm( src_vecs, W_att(queries).unsqueeze(-1) )
att_vecs = F.softmax(att_score, dim=1) * src_vecs
att_vecs = att_vecs.sum(dim=1)
return att_vecs if len(size) == 1 else att_vecs.view(size[0], size[1], -1)
def get_topo_score(self, src_tree_vecs, batch_idx, topo_vecs):
if self.use_attention:
topo_cxt = self.attention(src_tree_vecs, batch_idx, topo_vecs, self.A_topo)
else:
topo_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0)
return self.topoNN( torch.cat([topo_vecs, topo_cxt], dim=-1) ).squeeze(-1)
def get_cls_score(self, src_tree_vecs, batch_idx, cls_vecs, cls_labs):
if self.use_attention:
cls_cxt = self.attention(src_tree_vecs, batch_idx, cls_vecs, self.A_cls)
else:
cls_cxt = src_tree_vecs.index_select(index=batch_idx, dim=0)
cls_vecs = torch.cat([cls_vecs, cls_cxt], dim=-1)
cls_scores = self.clsNN(cls_vecs)
if cls_labs is None: #inference mode
icls_scores = self.iclsNN(cls_vecs) #no masking
else:
vocab_masks = self.vocab.get_mask(cls_labs)
icls_scores = self.iclsNN(cls_vecs) + vocab_masks #apply mask by log(x + mask): mask=0 or -INF
return cls_scores, icls_scores
def get_assm_score(self, src_graph_vecs, batch_idx, assm_vecs):
if self.use_attention:
assm_cxt = self.attention(src_graph_vecs, batch_idx, assm_vecs, self.A_assm)
else:
assm_cxt = index_select_ND(src_graph_vecs, 0, batch_idx)
return (self.W_assm(assm_vecs) * assm_cxt).sum(dim=-1)
def forward(self, src_mol_vecs, graphs, tensors, orders):
batch_size = len(orders)
tree_batch, graph_batch = graphs
tree_tensors, graph_tensors = tensors
inter_tensors = tree_tensors
src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs
init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs)
htree, tree_tensors = self.init_decoder_state(tree_batch, tree_tensors, init_vecs)
hinter = HTuple(
mess = self.rnn_cell.get_init_state(inter_tensors[1]),
emask = self.itensor.new_zeros(inter_tensors[1].size(0))
)
hgraph = HTuple(
mess = self.rnn_cell.get_init_state(graph_tensors[1]),
vmask = self.itensor.new_zeros(graph_tensors[0].size(0)),
emask = self.itensor.new_zeros(graph_tensors[1].size(0))
)
all_topo_preds, all_cls_preds, all_assm_preds = [], [], []
new_atoms = []
tree_scope = tree_tensors[-1]
for i in range(batch_size):
root = tree_batch.nodes[ tree_scope[i][0] ]
clab, ilab = self.vocab[ root['label'] ]
all_cls_preds.append( (init_vecs[i], i, clab, ilab) ) #cluster prediction
new_atoms.extend(root['cluster'])
subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph)
graph_tensors = self.hmpn.embed_graph(graph_tensors) + (graph_tensors[-1],) #preprocess graph tensors
maxt = max([len(x) for x in orders])
max_cls_size = max( [len(attr) * 2 for node,attr in tree_batch.nodes(data='cluster')] )
for t in range(maxt):
batch_list = [i for i in range(batch_size) if t < len(orders[i])]
assert htree.emask[0].item() == 0 and hinter.emask[0].item() == 0 and hgraph.vmask[0].item() == 0 and hgraph.emask[0].item() == 0
subtree = [], []
for i in batch_list:
xid, yid, tlab = orders[i][t]
subtree[0].append(xid)
if yid is not None:
mess_idx = tree_batch[xid][yid]['mess_idx']
subtree[1].append(mess_idx)
subtree = htree.emask.new_tensor(subtree[0]), htree.emask.new_tensor(subtree[1])
htree.emask.scatter_(0, subtree[1], 1)
hinter.emask.scatter_(0, subtree[1], 1)
cur_tree_tensors = self.apply_tree_mask(tree_tensors, htree, hgraph)
cur_inter_tensors = self.apply_tree_mask(inter_tensors, hinter, hgraph)
cur_graph_tensors = self.apply_graph_mask(graph_tensors, hgraph)
htree, hinter, hgraph = self.hmpn(cur_tree_tensors, cur_inter_tensors, cur_graph_tensors, htree, hinter, hgraph, subtree, subgraph)
new_atoms = []
for i in batch_list:
xid, yid, tlab = orders[i][t]
all_topo_preds.append( (htree.node[xid], i, tlab) ) #topology prediction
if yid is not None:
mess_idx = tree_batch[xid][yid]['mess_idx']
new_atoms.extend( tree_batch.nodes[yid]['cluster'] ) #NOTE: regardless of tlab = 0 or 1
if tlab == 0: continue
cls = tree_batch.nodes[yid]['smiles']
clab, ilab = self.vocab[ tree_batch.nodes[yid]['label'] ]
mess_idx = tree_batch[xid][yid]['mess_idx']
hmess = self.rnn_cell.get_hidden_state(htree.mess)
all_cls_preds.append( (hmess[mess_idx], i, clab, ilab) ) #cluster prediction using message
inter_label = tree_batch.nodes[yid]['inter_label']
inter_label = [ (pos, self.vocab[(cls, icls)][1]) for pos,icls in inter_label ]
inter_size = self.vocab.get_inter_size(ilab)
if len(tree_batch.nodes[xid]['cluster']) > 2: #uncertainty occurs only when previous cluster is a ring
nth_child = tree_batch[yid][xid]['label'] #must be yid -> xid (graph order labeling is different from tree)
cands = tree_batch.nodes[yid]['assm_cands']
icls = list(zip(*inter_label))[1]
cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child)
if len(cand_vecs) < max_cls_size:
pad_len = max_cls_size - len(cand_vecs)
cand_vecs = F.pad(cand_vecs, (0,0,0,pad_len))
batch_idx = hgraph.emask.new_tensor( [i] * max_cls_size )
all_assm_preds.append( (cand_vecs, batch_idx, 0) ) #the label is always the first of assm_cands
subgraph = self.update_graph_mask(graph_batch, new_atoms, hgraph)
topo_vecs, batch_idx, topo_labels = zip_tensors(all_topo_preds)
topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, topo_vecs)
topo_loss = self.topo_loss(topo_scores, topo_labels.float())
topo_acc = get_accuracy_bin(topo_scores, topo_labels)
cls_vecs, batch_idx, cls_labs, icls_labs = zip_tensors(all_cls_preds)
cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, cls_vecs, cls_labs)
cls_loss = self.cls_loss(cls_scores, cls_labs) + self.icls_loss(icls_scores, icls_labs)
cls_acc = get_accuracy(cls_scores, cls_labs)
icls_acc = get_accuracy(icls_scores, icls_labs)
if len(all_assm_preds) > 0:
assm_vecs, batch_idx, assm_labels = zip_tensors(all_assm_preds)
assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, assm_vecs)
assm_loss = self.assm_loss(assm_scores, assm_labels)
assm_acc = get_accuracy_sym(assm_scores, assm_labels)
else:
assm_loss, assm_acc = 0, 1
loss = (topo_loss + cls_loss + assm_loss) / batch_size
return loss, cls_acc, icls_acc, topo_acc, assm_acc
def enum_attach(self, hgraph, cands, icls, nth_child):
cands = self.itensor.new_tensor(cands)
icls_vecs = self.itensor.new_tensor(icls * len(cands))
icls_vecs = self.E_assm( icls_vecs )
nth_child = self.itensor.new_tensor([nth_child] * len(cands.view(-1)))
order_vecs = self.E_order.index_select(0, nth_child)
cand_vecs = hgraph.node.index_select(0, cands.view(-1))
cand_vecs = torch.cat( [cand_vecs, icls_vecs, order_vecs], dim=-1 )
cand_vecs = self.matchNN(cand_vecs)
if len(icls) == 2:
cand_vecs = cand_vecs.view(-1, 2, self.hidden_size).sum(dim=1)
return cand_vecs
def decode(self, src_mol_vecs, greedy=True, max_decode_step=100, beam=5):
src_root_vecs, src_tree_vecs, src_graph_vecs = src_mol_vecs
batch_size = len(src_root_vecs)
tree_batch = IncTree(batch_size, node_fdim=2, edge_fdim=3)
graph_batch = IncGraph(self.avocab, batch_size, node_fdim=self.hmpn.atom_size, edge_fdim=self.hmpn.atom_size + self.hmpn.bond_size)
stack = [[] for i in range(batch_size)]
init_vecs = src_root_vecs if self.latent_size == self.hidden_size else self.W_root(src_root_vecs)
batch_idx = self.itensor.new_tensor(range(batch_size))
cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, batch_idx, init_vecs, None)
root_cls = cls_scores.max(dim=-1)[1]
icls_scores = icls_scores + self.vocab.get_mask(root_cls)
root_cls, root_icls = root_cls.tolist(), icls_scores.max(dim=-1)[1].tolist()
super_root = tree_batch.add_node()
for bid in range(batch_size):
clab, ilab = root_cls[bid], root_icls[bid]
root_idx = tree_batch.add_node( batch_idx.new_tensor([clab, ilab]) )
tree_batch.add_edge(super_root, root_idx)
stack[bid].append(root_idx)
root_smiles = self.vocab.get_ismiles(ilab)
new_atoms, new_bonds, attached = graph_batch.add_mol(bid, root_smiles, [], 0)
tree_batch.register_cgraph(root_idx, new_atoms, new_bonds, attached)
#invariance: tree_tensors is equal to inter_tensors (but inter_tensor's init_vec is 0)
tree_tensors = tree_batch.get_tensors()
graph_tensors = graph_batch.get_tensors()
htree = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) )
hinter = HTuple( mess = self.rnn_cell.get_init_state(tree_tensors[1]) )
hgraph = HTuple( mess = self.rnn_cell.get_init_state(graph_tensors[1]) )
h = self.rnn_cell.get_hidden_state(htree.mess)
h[1 : batch_size + 1] = init_vecs #wiring root (only for tree, not inter)
for t in range(max_decode_step):
batch_list = [ bid for bid in range(batch_size) if len(stack[bid]) > 0 ]
if len(batch_list) == 0: break
batch_idx = batch_idx.new_tensor(batch_list)
cur_tree_nodes = [stack[bid][-1] for bid in batch_list]
subtree = batch_idx.new_tensor(cur_tree_nodes), batch_idx.new_tensor([])
subgraph = batch_idx.new_tensor( tree_batch.get_cluster_nodes(cur_tree_nodes) ), batch_idx.new_tensor( tree_batch.get_cluster_edges(cur_tree_nodes) )
htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph)
topo_scores = self.get_topo_score(src_tree_vecs, batch_idx, htree.node.index_select(0, subtree[0]))
topo_scores = torch.sigmoid(topo_scores)
if greedy:
topo_preds = topo_scores.tolist()
else:
topo_preds = torch.bernoulli(topo_scores).tolist()
new_mess = []
expand_list = []
for i,bid in enumerate(batch_list):
if topo_preds[i] > 0.5 and tree_batch.can_expand(stack[bid][-1]):
expand_list.append( (len(new_mess), bid) )
new_node = tree_batch.add_node() #new node label is yet to be predicted
edge_feature = batch_idx.new_tensor( [stack[bid][-1], new_node, 0] ) #parent to child is 0
new_edge = tree_batch.add_edge(stack[bid][-1], new_node, edge_feature)
stack[bid].append(new_node)
new_mess.append(new_edge)
else:
child = stack[bid].pop()
if len(stack[bid]) > 0:
nth_child = tree_batch.graph.in_degree(stack[bid][-1]) #edge child -> father has not established
edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )
new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)
new_mess.append(new_edge)
subtree = subtree[0], batch_idx.new_tensor(new_mess)
subgraph = [], []
htree, hinter, hgraph = self.hmpn(tree_tensors, tree_tensors, graph_tensors, htree, hinter, hgraph, subtree, subgraph)
cur_mess = self.rnn_cell.get_hidden_state(htree.mess).index_select(0, subtree[1])
if len(expand_list) > 0:
idx_in_mess, expand_list = zip(*expand_list)
idx_in_mess = batch_idx.new_tensor( idx_in_mess )
expand_idx = batch_idx.new_tensor( expand_list )
forward_mess = cur_mess.index_select(0, idx_in_mess)
cls_scores, icls_scores = self.get_cls_score(src_tree_vecs, expand_idx, forward_mess, None)
scores, cls_topk, icls_topk = hier_topk(cls_scores, icls_scores, self.vocab, beam)
if not greedy:
scores = torch.exp(scores) #score is output of log_softmax
shuf_idx = torch.multinomial(scores, beam, replacement=True).tolist()
for i,bid in enumerate(expand_list):
new_node, fa_node = stack[bid][-1], stack[bid][-2]
success = False
cls_beam = range(beam) if greedy else shuf_idx[i]
for kk in cls_beam: #try until one is chemically valid
if success: break
clab, ilab = cls_topk[i][kk], icls_topk[i][kk]
node_feature = batch_idx.new_tensor( [clab, ilab] )
tree_batch.set_node_feature(new_node, node_feature)
smiles, ismiles = self.vocab.get_smiles(clab), self.vocab.get_ismiles(ilab)
fa_cluster, _, fa_used = tree_batch.get_cluster(fa_node)
inter_cands, anchor_smiles, attach_points = graph_batch.get_assm_cands(fa_cluster, fa_used, ismiles)
if len(inter_cands) == 0:
continue
elif len(inter_cands) == 1:
sorted_cands = [(inter_cands[0], 0)]
nth_child = 0
else:
nth_child = tree_batch.graph.in_degree(fa_node)
icls = [self.vocab[ (smiles,x) ][1] for x in anchor_smiles]
cands = inter_cands if len(attach_points) <= 2 else [ (x[0],x[-1]) for x in inter_cands ]
cand_vecs = self.enum_attach(hgraph, cands, icls, nth_child)
batch_idx = batch_idx.new_tensor( [bid] * len(inter_cands) )
assm_scores = self.get_assm_score(src_graph_vecs, batch_idx, cand_vecs).tolist()
sorted_cands = sorted( list(zip(inter_cands, assm_scores)), key = lambda x:x[1], reverse=True )
for inter_label,_ in sorted_cands:
inter_label = list(zip(inter_label, attach_points))
if graph_batch.try_add_mol(bid, ismiles, inter_label):
new_atoms, new_bonds, attached = graph_batch.add_mol(bid, ismiles, inter_label, nth_child)
tree_batch.register_cgraph(new_node, new_atoms, new_bonds, attached)
tree_batch.update_attached(fa_node, inter_label)
success = True
break
if not success: #force backtrack
child = stack[bid].pop() #pop the dummy new_node which can't be added
nth_child = tree_batch.graph.in_degree(stack[bid][-1])
edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )
new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)
child = stack[bid].pop()
if len(stack[bid]) > 0:
nth_child = tree_batch.graph.in_degree(stack[bid][-1])
edge_feature = batch_idx.new_tensor( [child, stack[bid][-1], nth_child] )
new_edge = tree_batch.add_edge(child, stack[bid][-1], edge_feature)
return graph_batch.get_mol()
| [
[
[
7,
12
],
[
871,
876
],
[
1124,
1129
],
[
4818,
4823
],
[
5239,
5244
],
[
5774,
5779
],
[
6128,
6133
],
[
13215,
13220
],
[
16290,
16295
],
[
16437,
16442
],
[
18472,
18477
],
[
18553,
18558
]
],
[
[
20,
34
],
[
449,
451
],
[
1182,
1184
],
[
1213,
1215
],
[
1280,
1282
],
[
1307,
1309
],
[
1344,
1346
],
[
1401,
1403
],
[
1432,
1434
],
[
1499,
1501
],
[
1526,
1528
],
[
1563,
1565
],
[
1635,
1637
],
[
1666,
1668
],
[
1733,
1735
],
[
1760,
1762
],
[
1797,
1799
],
[
1870,
1872
],
[
1901,
1903
],
[
1986,
1988
],
[
2029,
2031
],
[
2131,
2133
],
[
2225,
2227
],
[
2286,
2288
],
[
2348,
2350
],
[
2410,
2412
],
[
2475,
2477
],
[
2540,
2542
],
[
2605,
2607
]
],
[
[
42,
60
]
],
[
[
68,
92
],
[
5310,
5311
],
[
11288,
11289
]
],
[
[
120,
121
],
[
2793,
2808
],
[
2857,
2872
],
[
2921,
2936
],
[
3149,
3164
],
[
3216,
3231
],
[
6757,
6772
],
[
11641,
11652
],
[
11836,
11852
],
[
11931,
11942
],
[
12171,
12183
],
[
12225,
12237
],
[
12347,
12358
],
[
12547,
12563
],
[
18359,
18368
]
],
[
[
149,
166
],
[
920,
937
]
],
[
[
196,
204
],
[
1134,
1142
],
[
1938,
1946
]
],
[
[
234,
241
],
[
13652,
13659
]
],
[
[
243,
251
],
[
13720,
13728
]
],
[
[
259,
265
],
[
4705,
4711
],
[
7370,
7376
],
[
7541,
7547
],
[
15108,
15114
],
[
15188,
15194
],
[
15268,
15274
]
],
[
[
434,
448
],
[
605,
619
]
]
] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import numpy as np
import numpy.testing as npt
import six
from caffe2.python import core, workspace
from ml.rl.caffe_utils import C2
from ml.rl.preprocessing import identify_types, normalization
from ml.rl.preprocessing.identify_types import BOXCOX, CONTINUOUS, ENUM
from ml.rl.preprocessing.normalization import (
NormalizationParameters,
sort_features_by_normalization,
)
from ml.rl.preprocessing.preprocessor_net import PreprocessorNet
from ml.rl.test.preprocessing_util import (
BOXCOX_FEATURE_ID,
ENUM_FEATURE_ID,
PROBABILITY_FEATURE_ID,
id_to_type,
read_data,
)
from ml.rl.test.utils import NumpyFeatureProcessor
from scipy import special
class TestNormalization(unittest.TestCase):
def _feature_type_override(self, feature_id):
"""
This should only be used to test CONTINUOUS_ACTION
"""
if id_to_type(feature_id) == identify_types.CONTINUOUS_ACTION:
return identify_types.CONTINUOUS_ACTION
return None
def test_prepare_normalization_and_normalize(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, 10, feature_type=self._feature_type_override(name)
)
for k, v in normalization_parameters.items():
if id_to_type(k) == CONTINUOUS:
self.assertEqual(v.feature_type, CONTINUOUS)
self.assertIs(v.boxcox_lambda, None)
self.assertIs(v.boxcox_shift, None)
elif id_to_type(k) == BOXCOX:
self.assertEqual(v.feature_type, BOXCOX)
self.assertIsNot(v.boxcox_lambda, None)
self.assertIsNot(v.boxcox_shift, None)
else:
assert v.feature_type == id_to_type(k)
sorted_features, _ = sort_features_by_normalization(normalization_parameters)
norm_net = core.Net("net")
C2.set_net(norm_net)
preprocessor = PreprocessorNet()
input_matrix = np.zeros([10000, len(sorted_features)], dtype=np.float32)
for i, feature in enumerate(sorted_features):
input_matrix[:, i] = feature_value_map[feature]
input_matrix_blob = "input_matrix_blob"
workspace.FeedBlob(input_matrix_blob, np.array([], dtype=np.float32))
output_blob, _ = preprocessor.normalize_dense_matrix(
input_matrix_blob, sorted_features, normalization_parameters, "", False
)
workspace.FeedBlob(input_matrix_blob, input_matrix)
workspace.RunNetOnce(norm_net)
normalized_feature_matrix = workspace.FetchBlob(output_blob)
normalized_features = {}
on_column = 0
for feature in sorted_features:
norm = normalization_parameters[feature]
if norm.feature_type == ENUM:
column_size = len(norm.possible_values)
else:
column_size = 1
normalized_features[feature] = normalized_feature_matrix[
:, on_column : (on_column + column_size)
]
on_column += column_size
self.assertTrue(
all(
[
np.isfinite(parameter.stddev) and np.isfinite(parameter.mean)
for parameter in normalization_parameters.values()
]
)
)
for k, v in six.iteritems(normalized_features):
self.assertTrue(np.all(np.isfinite(v)))
feature_type = normalization_parameters[k].feature_type
if feature_type == identify_types.PROBABILITY:
sigmoidv = special.expit(v)
self.assertTrue(
np.all(
np.logical_and(np.greater(sigmoidv, 0), np.less(sigmoidv, 1))
)
)
elif feature_type == identify_types.ENUM:
possible_values = normalization_parameters[k].possible_values
self.assertEqual(v.shape[0], len(feature_value_map[k]))
self.assertEqual(v.shape[1], len(possible_values))
possible_value_map = {}
for i, possible_value in enumerate(possible_values):
possible_value_map[possible_value] = i
for i, row in enumerate(v):
original_feature = feature_value_map[k][i]
self.assertEqual(
possible_value_map[original_feature], np.where(row == 1)[0][0]
)
elif feature_type == identify_types.QUANTILE:
for i, feature in enumerate(v[0]):
original_feature = feature_value_map[k][i]
expected = NumpyFeatureProcessor.value_to_quantile(
original_feature, normalization_parameters[k].quantiles
)
self.assertAlmostEqual(feature, expected, 2)
elif feature_type == identify_types.BINARY:
pass
elif (
feature_type == identify_types.CONTINUOUS
or feature_type == identify_types.BOXCOX
):
one_stddev = np.isclose(np.std(v, ddof=1), 1, atol=0.01)
zero_stddev = np.isclose(np.std(v, ddof=1), 0, atol=0.01)
zero_mean = np.isclose(np.mean(v), 0, atol=0.01)
self.assertTrue(
np.all(zero_mean),
"mean of feature {} is {}, not 0".format(k, np.mean(v)),
)
self.assertTrue(np.all(np.logical_or(one_stddev, zero_stddev)))
elif feature_type == identify_types.CONTINUOUS_ACTION:
less_than_max = v < 1
more_than_min = v > -1
self.assertTrue(
np.all(less_than_max),
"values are not less than 1: {}".format(v[less_than_max == False]),
)
self.assertTrue(
np.all(more_than_min),
"values are not more than -1: {}".format(v[more_than_min == False]),
)
else:
raise NotImplementedError()
def test_normalize_dense_matrix_enum(self):
normalization_parameters = {
1: NormalizationParameters(
identify_types.ENUM,
None,
None,
None,
None,
[12, 4, 2],
None,
None,
None,
),
2: NormalizationParameters(
identify_types.CONTINUOUS, None, 0, 0, 1, None, None, None, None
),
3: NormalizationParameters(
identify_types.ENUM, None, None, None, None, [15, 3], None, None, None
),
}
norm_net = core.Net("net")
C2.set_net(norm_net)
preprocessor = PreprocessorNet()
inputs = np.zeros([4, 3], dtype=np.float32)
feature_ids = [2, 1, 3] # Sorted according to feature type
inputs[:, feature_ids.index(1)] = [12, 4, 2, 2]
inputs[:, feature_ids.index(2)] = [1.0, 2.0, 3.0, 3.0]
inputs[:, feature_ids.index(3)] = [15, 3, 15, normalization.MISSING_VALUE]
input_blob = C2.NextBlob("input_blob")
workspace.FeedBlob(input_blob, np.array([0], dtype=np.float32))
normalized_output_blob, _ = preprocessor.normalize_dense_matrix(
input_blob, feature_ids, normalization_parameters, "", False
)
workspace.FeedBlob(input_blob, inputs)
workspace.RunNetOnce(norm_net)
normalized_feature_matrix = workspace.FetchBlob(normalized_output_blob)
np.testing.assert_allclose(
np.array(
[
[1.0, 1, 0, 0, 1, 0],
[2.0, 0, 1, 0, 0, 1],
[3.0, 0, 0, 1, 1, 0],
[3.0, 0, 0, 1, 0, 0], # Missing values should go to all 0
]
),
normalized_feature_matrix,
)
def test_persistency(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, feature_type=self._feature_type_override(name)
)
s = normalization.serialize(normalization_parameters)
read_parameters = normalization.deserialize(s)
# Unfortunately, Thrift serializatin seems to lose a bit of precision.
# Using `==` will be false.
self.assertEqual(read_parameters.keys(), normalization_parameters.keys())
for k in normalization_parameters:
self.assertEqual(
read_parameters[k].feature_type,
normalization_parameters[k].feature_type,
)
self.assertEqual(
read_parameters[k].possible_values,
normalization_parameters[k].possible_values,
)
for field in [
"boxcox_lambda",
"boxcox_shift",
"mean",
"stddev",
"quantiles",
"min_value",
"max_value",
]:
if getattr(normalization_parameters[k], field) is None:
self.assertEqual(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
else:
npt.assert_allclose(
getattr(read_parameters[k], field),
getattr(normalization_parameters[k], field),
)
def test_preprocessing_network(self):
feature_value_map = read_data()
normalization_parameters = {}
for name, values in feature_value_map.items():
normalization_parameters[name] = normalization.identify_parameter(
values, feature_type=self._feature_type_override(name)
)
test_features = NumpyFeatureProcessor.preprocess(
feature_value_map, normalization_parameters
)
net = core.Net("PreprocessingTestNet")
C2.set_net(net)
preprocessor = PreprocessorNet()
name_preprocessed_blob_map = {}
for feature_name in feature_value_map:
workspace.FeedBlob(str(feature_name), np.array([0], dtype=np.int32))
preprocessed_blob, _ = preprocessor.preprocess_blob(
str(feature_name), [normalization_parameters[feature_name]]
)
name_preprocessed_blob_map[feature_name] = preprocessed_blob
workspace.CreateNet(net)
for feature_name, feature_value in six.iteritems(feature_value_map):
feature_value = np.expand_dims(feature_value, -1)
workspace.FeedBlob(str(feature_name), feature_value)
workspace.RunNetOnce(net)
for feature_name in feature_value_map:
normalized_features = workspace.FetchBlob(
name_preprocessed_blob_map[feature_name]
)
if feature_name != ENUM_FEATURE_ID:
normalized_features = np.squeeze(normalized_features, -1)
tolerance = 0.01
if feature_name == BOXCOX_FEATURE_ID:
# At the limit, boxcox has some numerical instability
tolerance = 0.5
non_matching = np.where(
np.logical_not(
np.isclose(
normalized_features,
test_features[feature_name],
rtol=tolerance,
atol=tolerance,
)
)
)
self.assertTrue(
np.all(
np.isclose(
normalized_features,
test_features[feature_name],
rtol=tolerance,
atol=tolerance,
)
),
"{} does not match: {} {}".format(
feature_name,
normalized_features[non_matching].tolist(),
test_features[feature_name][non_matching].tolist(),
),
)
def test_type_override(self):
# Take a feature that should be identified as probability
feature_value_map = read_data()
probability_values = feature_value_map[PROBABILITY_FEATURE_ID]
# And ask for a binary anyways
parameter = normalization.identify_parameter(
probability_values, feature_type=identify_types.BINARY
)
self.assertEqual(parameter.feature_type, "BINARY")
| [
[
[
103,
111
],
[
812,
820
]
],
[
[
120,
131
],
[
2229,
2231
],
[
2275,
2277
],
[
2495,
2497
],
[
2514,
2516
],
[
3407,
3409
],
[
3441,
3443
],
[
3666,
3668
],
[
3673,
3675
],
[
3914,
3916
],
[
3946,
3948
],
[
3961,
3963
],
[
3986,
3988
],
[
4696,
4698
],
[
5409,
5411
],
[
5420,
5422
],
[
5483,
5485
],
[
5494,
5496
],
[
5555,
5557
],
[
5566,
5568
],
[
5645,
5647
],
[
5728,
5730
],
[
5791,
5793
],
[
5798,
5800
],
[
6036,
6038
],
[
6218,
6220
],
[
7181,
7183
],
[
7204,
7206
],
[
7572,
7574
],
[
7592,
7594
],
[
7936,
7938
],
[
7976,
7978
],
[
10721,
10723
],
[
10741,
10743
],
[
11120,
11122
],
[
11513,
11515
],
[
11758,
11760
],
[
11784,
11786
],
[
11820,
11822
],
[
12109,
12111
],
[
12137,
12139
]
],
[
[
139,
159
],
[
9834,
9837
]
],
[
[
167,
170
],
[
3602,
3605
],
[
11058,
11061
]
],
[
[
197,
201
],
[
2120,
2124
],
[
7077,
7081
],
[
10486,
10490
]
],
[
[
203,
212
],
[
2457,
2466
],
[
2691,
2700
],
[
2751,
2760
],
[
2818,
2827
],
[
7541,
7550
],
[
7769,
7778
],
[
7816,
7825
],
[
7883,
7892
],
[
10683,
10692
],
[
10989,
10998
],
[
11166,
11175
],
[
11227,
11236
],
[
11335,
11344
]
],
[
[
243,
245
],
[
2144,
2146
],
[
7101,
7103
],
[
7507,
7509
],
[
10527,
10529
]
],
[
[
278,
292
],
[
1002,
1016
],
[
1055,
1069
],
[
3789,
3803
],
[
4081,
4095
],
[
4776,
4790
],
[
5187,
5201
],
[
5282,
5296
],
[
5343,
5357
],
[
5872,
5886
],
[
6552,
6566
],
[
6826,
6840
],
[
6962,
6976
],
[
12973,
12987
]
],
[
[
294,
307
],
[
1344,
1357
],
[
7457,
7470
],
[
8502,
8515
],
[
8634,
8647
],
[
8710,
8723
],
[
10228,
10241
],
[
12894,
12907
]
],
[
[
355,
361
],
[
1765,
1771
],
[
1822,
1828
]
],
[
[
363,
373
],
[
1553,
1563
],
[
1614,
1624
]
],
[
[
375,
379
],
[
3036,
3040
]
],
[
[
432,
455
],
[
6511,
6534
],
[
6785,
6808
],
[
6921,
6944
]
],
[
[
461,
491
],
[
2043,
2073
]
],
[
[
544,
559
],
[
2188,
2203
],
[
7145,
7160
],
[
10566,
10581
]
],
[
[
608,
625
],
[
11610,
11627
]
],
[
[
631,
646
],
[
11458,
11473
]
],
[
[
652,
674
],
[
12810,
12832
]
],
[
[
680,
690
],
[
976,
986
],
[
1536,
1546
],
[
1748,
1758
],
[
2000,
2010
]
],
[
[
696,
705
],
[
1193,
1202
],
[
8352,
8361
],
[
10077,
10086
],
[
12751,
12760
]
],
[
[
738,
759
],
[
4946,
4967
],
[
10371,
10392
]
],
[
[
778,
785
],
[
3844,
3851
]
],
[
[
794,
811
]
]
] |
import speech_recognition as sr
r=sr.Recognizer()
with sr.Microphone() as source:
print("Say Something")
sudio=r.listen(source)
print("Time over")
try:
print("Text: "+r.recognize_google(audio))
except:
pass
| [
[
[
7,
31
],
[
34,
36
],
[
55,
57
]
],
[
[
32,
33
],
[
113,
114
],
[
173,
174
]
],
[
[
74,
80
],
[
122,
128
]
],
[
[
107,
112
]
]
] |
import tkinter
from tkinter import *
win = Tk()
sb = Spinbox(win, from_=0, to=10)
sb.pack()
win.mainloop()
| [
[
[
7,
14
]
],
[
[
35,
36
],
[
44,
46
],
[
55,
62
]
],
[
[
38,
41
],
[
63,
66
],
[
95,
98
]
],
[
[
50,
52
],
[
84,
86
]
]
] |
import citysim3d.envs
from visual_dynamics.envs import Env
class ServoingEnv(citysim3d.envs.SimpleQuadPanda3dServoingEnv, Env):
def _get_config(self):
config = super(ServoingEnv, self)._get_config()
config.update({'env': self.env,
'max_time_steps': self.max_time_steps,
'distance_threshold': self.distance_threshold})
return config
# class ServoingEnv(citysim3d.envs.ServoingEnv, Env):
# def _get_config(self):
# config = super(ServoingEnv, self)._get_config()
# config.update({'env': self.env})
# return config
#
#
# class SimpleQuadPanda3dServoingEnv(citysim3d.envs.SimpleQuadPanda3dServoingEnv, ServoingEnv):
# def _get_config(self):
# config = super(SimpleQuadPanda3dServoingEnv, self)._get_config()
# config.update({'env': self.env,
# 'max_time_steps': self.max_time_steps,
# 'distance_threshold': self.distance_threshold})
# return config
| [
[
[
7,
21
],
[
80,
89
]
],
[
[
56,
59
],
[
125,
128
]
],
[
[
68,
79
],
[
181,
192
]
]
] |
from .dynamic_iterbased_runner import DynamicIterBasedRunner
__all__ = ['DynamicIterBasedRunner']
| [
[
[
38,
60
]
],
[
[
62,
69
]
]
] |
#Django Imports
from django.conf import settings
#Python Imports
import requests, os
#Local Imports
from .at_utils import AfricasTalkingException
#Import Afica's Talking Settings
AFRICAS_TALKING_SETTINGS = getattr(settings,'AFRICAS_TALKING',{})
API_KEY = AFRICAS_TALKING_SETTINGS.get('API_KEY',None)
USERNAME = AFRICAS_TALKING_SETTINGS.get('USERNAME',None)
SHORTCODE = AFRICAS_TALKING_SETTINGS.get('SHORTCODE',None)
AFRICAS_TALKING_SEND = AFRICAS_TALKING_SETTINGS.get('SEND',False)
AFRICAS_TALKING_API_BASE = 'http://api.africastalking.com/version1'
HEADERS = {'Accept': 'application/json','apikey':API_KEY}
PARAMS = {'username':USERNAME,'bulkSMSMode':1}
if SHORTCODE:
PARAMS['from'] = SHORTCODE
def send_raw(to,message):
if not AFRICAS_TALKING_SEND:
raise AfricasTalkingException("Africas Talking called when send not set to True")
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'to':to,'message':message}
params.update(PARAMS)
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.post(send_url,data=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
return post.json()
def send(to,message):
data = send_raw(to,message)
'''
Example of JSON Response
{u'SMSMessageData':
{u'Message': u'Sent to 1/1 Total Cost: USD 0.0109',
u'Recipients': [{
u'status': u'Success', #u'status': u'Invalid Phone Number',
u'cost': u'KES 1.0000',
u'number': u'+254708054321',
u'messageId': u'ATXid_b50fada5b1af078f2277cacb58ef2447'
}]
}
}
'''
# Return tuple (messageId, messageSuccess, extra_data)
recipients = data['SMSMessageData']['Recipients']
if len(recipients) == 1:
msg_id = recipients[0]['messageId']
msg_success = recipients[0]['status'] == 'Success'
return msg_id, msg_success, {'status':recipients[0]['status']}
def balance():
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'user')
post = requests.get(send_url,params=params,headers=HEADERS)
#Raise requests.exceptions.HTTPError if 4XX or 5XX
post.raise_for_status()
data = post.json()
return data['UserData']['balance']
def fetch(last_received_id=0):
if API_KEY is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set API_KEY')
if USERNAME is None:
raise AfricasTalkingException('AFRICAS_TALKING var has not set a USERNAME')
params = {'username':USERNAME,'lastReceivedId':last_received_id}
send_url = os.path.join(AFRICAS_TALKING_API_BASE,'messaging')
post = requests.get(send_url,params=params,headers=HEADERS)
return post
| [
[
[
40,
48
],
[
217,
225
]
],
[
[
73,
81
],
[
1220,
1228
],
[
2494,
2502
],
[
3089,
3097
]
],
[
[
83,
85
],
[
1158,
1160
],
[
2437,
2439
],
[
3027,
3029
]
],
[
[
124,
147
],
[
784,
807
],
[
898,
921
],
[
1004,
1027
],
[
2209,
2232
],
[
2315,
2338
],
[
2765,
2788
],
[
2871,
2894
]
],
[
[
182,
206
],
[
259,
283
],
[
316,
340
],
[
375,
399
],
[
446,
470
]
],
[
[
249,
256
],
[
608,
615
],
[
867,
874
],
[
2178,
2185
],
[
2734,
2741
]
],
[
[
305,
313
],
[
639,
647
],
[
972,
980
],
[
2283,
2291
],
[
2411,
2419
],
[
2839,
2847
],
[
2967,
2975
]
],
[
[
363,
372
],
[
668,
677
],
[
700,
709
]
],
[
[
423,
443
],
[
748,
768
]
],
[
[
490,
514
],
[
1171,
1195
],
[
2450,
2474
],
[
3040,
3064
]
],
[
[
559,
566
],
[
1263,
1270
],
[
2538,
2545
],
[
3133,
3140
]
],
[
[
618,
624
],
[
683,
689
],
[
1134,
1140
]
],
[
[
715,
723
],
[
1414,
1422
]
],
[
[
1384,
1388
]
],
[
[
2159,
2166
]
],
[
[
2699,
2704
]
]
] |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os,sys,traceback,base64,signal
try:
import cPickle
except ImportError:
import pickle as cPickle
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
try:
TimeoutExpired=subprocess.TimeoutExpired
except AttributeError:
class TimeoutExpired(Exception):
pass
def run():
txt=sys.stdin.readline().strip()
if not txt:
sys.exit(1)
[cmd,kwargs,cargs]=cPickle.loads(base64.b64decode(txt))
cargs=cargs or{}
if not'close_fds'in kwargs:
kwargs['close_fds']=False
ret=1
out,err,ex,trace=(None,None,None,None)
try:
proc=subprocess.Popen(cmd,**kwargs)
try:
out,err=proc.communicate(**cargs)
except TimeoutExpired:
if kwargs.get('start_new_session')and hasattr(os,'killpg'):
os.killpg(proc.pid,signal.SIGKILL)
else:
proc.kill()
out,err=proc.communicate()
exc=TimeoutExpired(proc.args,timeout=cargs['timeout'],output=out)
exc.stderr=err
raise exc
ret=proc.returncode
except Exception as e:
exc_type,exc_value,tb=sys.exc_info()
exc_lines=traceback.format_exception(exc_type,exc_value,tb)
trace=str(cmd)+'\n'+''.join(exc_lines)
ex=e.__class__.__name__
tmp=[ret,out,err,ex,trace]
obj=base64.b64encode(cPickle.dumps(tmp))
sys.stdout.write(obj.decode())
sys.stdout.write('\n')
sys.stdout.flush()
while 1:
try:
run()
except KeyboardInterrupt:
break
| [
[
[
128,
130
],
[
829,
831
],
[
847,
849
]
],
[
[
131,
134
],
[
432,
435
],
[
476,
479
],
[
1107,
1110
],
[
1322,
1325
],
[
1354,
1357
],
[
1378,
1381
]
],
[
[
135,
144
],
[
1134,
1143
]
],
[
[
145,
151
],
[
522,
528
],
[
1284,
1290
]
],
[
[
152,
158
],
[
866,
872
]
],
[
[
172,
179
],
[
508,
515
],
[
1301,
1308
]
],
[
[
208,
225
],
[
508,
515
],
[
1301,
1308
]
],
[
[
239,
265
],
[
326,
336
],
[
680,
690
]
],
[
[
294,
304
],
[
326,
336
],
[
680,
690
]
],
[
[
311,
325
],
[
764,
778
],
[
944,
958
]
],
[
[
382,
396
],
[
764,
778
],
[
944,
958
]
],
[
[
420,
423
],
[
1414,
1417
]
]
] |
# -*- coding: utf-8 -*-
"""
Class and program to colorize python source code for ANSI terminals.
Based on an HTML code highlighter by Jurgen Hermann found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
Modifications by Fernando Perez (fperez@colorado.edu).
Information on the original HTML highlighter follows:
MoinMoin - Python Source Parser
Title: Colorize Python source using the built-in tokenizer
Submitter: Jurgen Hermann
Last Updated:2001/04/06
Version no:1.2
Description:
This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
Python source code to HTML markup, rendering comments, keywords,
operators, numeric and string literals in different colors.
It shows how to use the built-in keyword, token and tokenize modules to
scan Python source code and re-emit it with no changes to its original
formatting (which is the hard part).
"""
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['ANSICodeColors','Parser']
_scheme_default = 'Linux'
# Imports
import StringIO
import keyword
import os
import optparse
import sys
import token
import tokenize
try:
generate_tokens = tokenize.generate_tokens
except AttributeError:
# Python 3. Note that we use the undocumented _tokenize because it expects
# strings, not bytes. See also Python issue #9969.
generate_tokens = tokenize._tokenize
from IPython.utils.coloransi import *
#############################################################################
### Python Source Parser (does Hilighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
token.NUMBER : Colors.NoColor,
token.OP : Colors.NoColor,
token.STRING : Colors.NoColor,
tokenize.COMMENT : Colors.NoColor,
token.NAME : Colors.NoColor,
token.ERRORTOKEN : Colors.NoColor,
_KEYWORD : Colors.NoColor,
_TEXT : Colors.NoColor,
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
token.NUMBER : Colors.LightCyan,
token.OP : Colors.Yellow,
token.STRING : Colors.LightBlue,
tokenize.COMMENT : Colors.LightRed,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.LightGreen,
_TEXT : Colors.Yellow,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
LightBGColors = ColorScheme(
'LightBG',{
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors],
_scheme_default)
class Parser:
""" Format colored Python source.
"""
def __init__(self, color_table=None,out = sys.stdout):
""" Create a parser with a specified color table and output channel.
Call format() to process code.
"""
self.color_table = color_table and color_table or ANSICodeColors
self.out = out
def format(self, raw, out = None, scheme = ''):
return self.format2(raw, out, scheme)[0]
def format2(self, raw, out = None, scheme = ''):
""" Parse and send the colored source.
If out and scheme are not specified, the defaults (given to
constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == 'str' or self.out == 'str' or \
isinstance(self.out,StringIO.StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO.StringIO()
string_output = 1
elif out is not None:
self.out = out
# Fast return of the unmodified input for NoColor scheme
if scheme == 'NoColor':
error = False
self.out.write(raw)
if string_output:
return raw,error
else:
return None,error
# local shorthands
colors = self.color_table[scheme].colors
self.colors = colors # put in object so __call__ sees it
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while 1:
pos = raw_find('\n', pos) + 1
if not pos: break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO.StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
(colors[token.ERRORTOKEN],
msg, self.raw[self.lines[line]:],
colors.normal)
)
error = True
self.out.write(colors.normal+'\n')
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def __call__(self, toktype, toktext, start_pos, end_pos, line):
""" Token handler, with syntax highlighting."""
(srow,scol) = start_pos
(erow,ecol) = end_pos
colors = self.colors
owrite = self.out.write
# line separator, so this works across platforms
linesep = os.linesep
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
owrite(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
return
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = colors.get(toktype, colors[_TEXT])
#print '<%s>' % toktext, # dbg
# Triple quoted strings must be handled carefully so that backtracking
# in pagers works correctly. We need color terminators on _each_ line.
if linesep in toktext:
toktext = toktext.replace(linesep, '%s%s%s' %
(colors.normal,linesep,color))
# send text
owrite('%s%s%s' % (color,toktext,colors.normal))
def main(argv=None):
"""Run as a command-line script: colorize a python file or stdin using ANSI
color escapes and print to stdout.
Inputs:
- argv(None): a list of strings like sys.argv[1:] giving the command-line
arguments. If None, use sys.argv[1:].
"""
usage_msg = """%prog [options] [filename]
Colorize a python file or stdin using ANSI color escapes and print to stdout.
If no filename is given, or if filename is -, read standard input."""
parser = optparse.OptionParser(usage=usage_msg)
newopt = parser.add_option
newopt('-s','--scheme',metavar='NAME',dest='scheme_name',action='store',
choices=['Linux','LightBG','NoColor'],default=_scheme_default,
help="give the color scheme to use. Currently only 'Linux'\
(default) and 'LightBG' and 'NoColor' are implemented (give without\
quotes)")
opts,args = parser.parse_args(argv)
if len(args) > 1:
parser.error("you must give at most one filename.")
if len(args) == 0:
fname = '-' # no filename given; setup to read from stdin
else:
fname = args[0]
if fname == '-':
stream = sys.stdin
else:
try:
stream = open(fname)
except IOError as msg:
print(msg, file=sys.stderr)
sys.exit(1)
parser = Parser()
# we need nested try blocks because pre-2.5 python doesn't support unified
# try-except-finally
try:
try:
# write colorized version to stdout
parser.format(stream.read(),scheme=opts.scheme_name)
except IOError as msg:
# if user reads through a pager and quits, don't print traceback
if msg.args != (32,'Broken pipe'):
raise
finally:
if stream is not sys.stdin:
stream.close() # in case a non-handled exception happened above
if __name__ == "__main__":
main()
| [
[
[
915,
929
]
],
[
[
954,
970
]
],
[
[
972,
979
]
],
[
[
1011,
1026
],
[
3319,
3334
],
[
8496,
8511
]
],
[
[
1056,
1064
],
[
4269,
4277
],
[
4592,
4600
],
[
5608,
5616
]
],
[
[
1072,
1079
],
[
7245,
7252
]
],
[
[
1087,
1089
],
[
6663,
6665
]
],
[
[
1097,
1105
],
[
8292,
8300
]
],
[
[
1113,
1116
],
[
3444,
3447
],
[
8952,
8955
],
[
9077,
9080
],
[
9101,
9104
],
[
9591,
9594
]
],
[
[
1124,
1129
],
[
1648,
1653
],
[
1679,
1684
],
[
1915,
1920
],
[
1954,
1959
],
[
1993,
1998
],
[
2071,
2076
],
[
2110,
2115
],
[
2352,
2357
],
[
2393,
2398
],
[
2431,
2436
],
[
2512,
2517
],
[
2550,
2555
],
[
2792,
2797
],
[
2828,
2833
],
[
2864,
2869
],
[
2935,
2940
],
[
2973,
2978
],
[
5956,
5961
],
[
6996,
7001
],
[
7010,
7015
],
[
7128,
7133
],
[
7165,
7170
],
[
7197,
7202
],
[
7230,
7235
]
],
[
[
1137,
1145
],
[
1174,
1182
],
[
1378,
1386
],
[
2032,
2040
],
[
2472,
2480
],
[
2900,
2908
],
[
5775,
5783
]
],
[
[
1156,
1171
],
[
5698,
5713
]
],
[
[
1360,
1375
],
[
5698,
5713
]
],
[
[
1434,
1435
],
[
1812,
1822
],
[
1882,
1893
],
[
2321,
2332
],
[
2759,
2770
],
[
3231,
3247
]
],
[
[
1637,
1645
],
[
2150,
2158
],
[
2586,
2594
],
[
3009,
3017
],
[
7295,
7303
]
],
[
[
1668,
1673
],
[
2189,
2194
],
[
2628,
2633
],
[
3046,
3051
],
[
7347,
7352
]
],
[
[
1803,
1809
],
[
1934,
1940
],
[
1973,
1979
],
[
2012,
2018
],
[
2051,
2057
],
[
2090,
2096
],
[
2129,
2135
],
[
2169,
2175
],
[
2208,
2214
],
[
2248,
2254
],
[
2371,
2377
],
[
2412,
2418
],
[
2450,
2456
],
[
2491,
2497
],
[
2531,
2537
],
[
2569,
2575
],
[
2605,
2611
],
[
2647,
2653
],
[
2686,
2692
],
[
2811,
2817
],
[
2847,
2853
],
[
2883,
2889
],
[
2919,
2925
],
[
2954,
2960
],
[
2992,
2998
],
[
3028,
3034
],
[
3065,
3071
],
[
3102,
3108
]
],
[
[
1872,
1879
],
[
3249,
3256
]
],
[
[
2307,
2318
],
[
3257,
3268
]
],
[
[
2743,
2756
],
[
3269,
3282
]
],
[
[
3214,
3228
],
[
3644,
3658
]
],
[
[
3343,
3349
],
[
9127,
9133
]
],
[
[
7798,
7802
],
[
9710,
9714
]
]
] |
# Last Updated: 2.2
from datetime import datetime
from util.diagMessage import DiagMessage
# Logger class
# Buffers and writes messages to a file
class Logger:
BUFFER_MAX = 10
DEFAULT_FN = "../log.txt"
# Constructor for logger class
# Params: fn - file name to use or leave default
# log - flag to keep a log file or not
# Return: Logger instance
def __init__(self, fn = DEFAULT_FN, log = True):
#{{{
self.keep_log = log
self.fn = fn
self.log_buffer = []
if self.keep_log:
self.log(DiagMessage("LOG0000I"))
#}}}
# Append line to internal log buffer, flush if needed
# Params: diag - DiagMessage to log
# flush - bool flag for flushing buffer early
# Return: None
def log(self, diag, flush=False):
#{{{
if self.keep_log:
self.log_buffer.append(str(datetime.now()) + " - " + diag.msg)
if len(self.log_buffer) >= self.BUFFER_MAX or flush:
self._write()
elif not flush:
print(diag.msg)
#}}}
# Write contents of buffer out to file
# Params: None
# Return: None
def _write(self):
#{{{
print("Writing log...") if debug else None
with open(self.fn,'a') as logfile:
for line in self.log_buffer:
try:
logfile.write(line)
except TypeError:
logfile.write(str(datetime.now())+" - LOG ERR")
except UnicodeEncodeError:
logfile.write(str(line.encode("utf-8","replace")))
logfile.write("\n")
del self.log_buffer[:]
#}}}
| [
[
[
41,
49
],
[
892,
900
],
[
1465,
1473
]
],
[
[
79,
90
],
[
569,
580
]
],
[
[
153,
159
]
]
] |
# Standard Library
from copy import deepcopy
# 3rd Party
# Internal
# ########################################################################### #
class MetaData (dict):
"""
A class for holding information about an object
"""
def __init__ (self,*args,**kwargs):
super(MetaData,self).__init__(*args,**kwargs)
def __repr__ (self):
reprout = 'MetaData {'
if len(self) == 0:
return reprout + "}"
reprout += "\n"
for key in self:
value = str(repr(self[key])).split("\n")
reprout += " "+str(key)+" : "
reprout += value[0].strip()+"\n"
if len(value) > 1:
reprout += " "*(len(key))+" ...\n"
reprout += "}\n"
return reprout
def __str__ (self):
return super(MetaData,self).__repr__()
def _type_check_other (self,other):
if not isinstance(other,dict):
raise TypeError("other must be a subclass of dict")
def __add__ (self,other):
return self.combine(other,key_conflicts='raise')
def __iadd__ (self,other):
self._type_check_other(other)
for key in other:
if key in self:
continue
self[key] = other[key]
return self
def combine (self,other,key_conflicts='ignore',return_=False):
"""
Combine two MetaData dictionaries together.
Parameters
----------
other : dict subclass
Any dictionary object will work including other MetaData Dictionaries
key_conflicts : 'ignore' (default), 'merge', 'warn', 'raise'
Defined the method to handle key conflicts
* ignore : if key is in conflict, keep the current key with no warning
* merge : convert key to string and add integers until unique key is found
* warn : print a warning message for key conflicts. Keep current key
* raise : raise error message for key conflicts.
return_ : boolean
If True then it will keep the data in place and return a copy with
with the concatenation
Returns
-------
info : MetaData
Returns an information object with keys and information
concatenated from the two
Raises
------
KeyError : If key_conflicts=='raise' is True and conflicts exist between two keys
Notes
-----
__1)__ If a key is in conflict but the data the key refers to is the same then
no messages or errors will be raised
Special cases
-------------
add operator : info1 + info2
This will raise errors for key conflicts between the two
iadd operator : info1 += info2
This will ignore key conflicts
and always takes info1 keys as default
"""
self._type_check_other(other)
def errmsg (key):
return "Warning: key conflict '"+str(key)+"'"
key_conflicts = key_conflicts.lower()
if return_:
out = self.copy()
else:
out = self
if key_conflicts=='merge':
for key in other:
if key in self and self[key]==other[key]:
continue
i = 0
base_key = deepcopy(key)
while key in self:
key = str(base_key)+"_"+str(i)
i += 1
out[key] = other[base_key]
return out
# else:
for key in other:
if key in self:
# if the data's the same don't worry about it
if self[key]==other[key]:
continue
# resolve conflicts
if key_conflicts=='raise':
raise KeyError(errmsg(key))
elif key_conflicts=='warn':
print(errmsg(key))
else:
continue
out[key] = other[key]
if return_:
return out
def copy (self):
return deepcopy(self)
def header_list(self):
"""returns a list of the values belonging to keys beginning header_ """
keys = list(self.keys())
headers = []
for key in keys:
try:
keystart = key[:7]
if keystart == "header_":
headers.append(self[key])
except:
pass
return headers
def guess_observation_time(self, headers=None):
if headers == None:
headers = self.header_list()
obs_time = None
for hdr in headers:
try:
obs_time = hdr["ut"]
break
except:
pass
return obs_time
def guess_airmass(self, headers):
if headers == None:
headers = self.header_list()
airmass = None
for hdr in headers:
try:
airmass = hdr["airmass"]
break
except:
pass
return airmass
def guess_object_name(self):
return None
| [
[
[
37,
45
],
[
3518,
3526
],
[
4315,
4323
]
],
[
[
159,
167
],
[
303,
311
],
[
837,
845
]
]
] |
Inc('dfaccto/util.py', abs=True)
class _Event(ModuleContext):
def __init__(self):
ModuleContext.__init__(self)
self._setup_packages()
def _setup_packages(self):
self.pkg = Pkg('dfaccto_event',
x_templates={self.File('generic/package.vhd.tpl'): self.File('pkg/dfaccto_event.vhd')})
with self.pkg:
self.tEvent = self.TypeEvent('Event')
def TypeEvent(self, name, stb_bits=None, ack_bits=None):
tlogic = Util.tlogic
if stb_bits is not None:
tsdata = Util.TypeUnsigned('{}Strb'.format(name), width=stb_bits)
else:
tsdata = None
if ack_bits is not None:
tadata = Util.TypeUnsigned('{}Ack'.format(name), width=ack_bits)
else:
tadata = None
return TypeC(name, x_is_event=True,
x_definition=self.Part('types/definition/event.part.tpl'),
x_format_ms=self.Part('types/format/event_ms.part.tpl'),
x_format_sm=self.Part('types/format/event_sm.part.tpl'),
x_wrapeport=self.Part('types/wrapeport/event.part.tpl'),
x_wrapeconv=self.Part('types/wrapeconv/event.part.tpl'),
x_wrapipmap=self.Part('types/wrapipmap/event.part.tpl'),
x_wrapigmap=None,
x_tlogic=tlogic, x_tsdata=tsdata, x_tadata=tadata,
x_cnull=lambda t: Con('{}Null'.format(name), t, value=Lit({'stb': False, 'ack': False})))
Event = _Event()
| [
[
[
41,
47
],
[
1364,
1370
]
],
[
[
1356,
1361
]
]
] |
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
from .utils import rot_mat_to_euler
def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=vertices.device,
dtype=dtype).unsqueeze_(dim=0)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):
''' Calculates landmarks by barycentric interpolation
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor Fx3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns
-------
landmarks: torch.tensor BxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
'''
# Extract the indices of the vertices for each face
# BxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
batch_size, -1, 3)
lmk_faces += torch.arange(
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# v_shaped *= scale
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def transform_mat(R, t):
''' Creates a batch of transformation matrices
Args:
- R: Bx3x3 array of a batch of rotation matrices
- t: Bx3x1 array of a batch of translation vectors
Returns:
- T: Bx4x4 Transformation matrix
'''
# No padding left or right, only add an extra row
return torch.cat([F.pad(R, [0, 0, 0, 1]),
F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
transforms_mat = transform_mat(
rot_mats.reshape(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
# transforms_mat[:, 0][:,:3,:3] *= scale
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms
| [
[
[
706,
721
]
],
[
[
745,
759
]
],
[
[
783,
791
]
],
[
[
800,
811
],
[
3265,
3267
]
],
[
[
820,
825
],
[
1091,
1096
],
[
5400,
5405
],
[
9965,
9970
],
[
11545,
11550
],
[
2749,
2754
],
[
2970,
2975
],
[
3131,
3136
],
[
3191,
3196
],
[
3212,
3217
],
[
3310,
3315
],
[
3364,
3369
],
[
3416,
3421
],
[
3602,
3607
],
[
3726,
3731
],
[
4893,
4898
],
[
4997,
5002
],
[
5037,
5042
],
[
5196,
5201
],
[
7311,
7316
],
[
7623,
7628
],
[
7854,
7859
],
[
8337,
8342
],
[
8448,
8453
],
[
8572,
8577
],
[
8628,
8633
],
[
8644,
8649
],
[
9201,
9206
],
[
9840,
9845
],
[
10387,
10392
],
[
10478,
10483
],
[
10494,
10499
],
[
10529,
10534
],
[
10545,
10550
],
[
10605,
10610
],
[
10644,
10649
],
[
10717,
10722
],
[
10782,
10787
],
[
10898,
10903
],
[
11000,
11005
],
[
11386,
11391
],
[
12273,
12278
],
[
12803,
12808
],
[
12958,
12963
],
[
13324,
13329
]
],
[
[
833,
857
],
[
11397,
11398
],
[
11443,
11444
],
[
13246,
13247
],
[
13309,
13310
]
],
[
[
878,
894
],
[
3225,
3241
]
],
[
[
901,
933
]
],
[
[
3879,
3897
]
],
[
[
5286,
5289
]
],
[
[
8755,
8770
],
[
7208,
7223
]
],
[
[
9260,
9272
],
[
7106,
7118
]
],
[
[
9919,
9934
],
[
2865,
2880
],
[
7388,
7403
]
],
[
[
11041,
11054
],
[
12408,
12421
]
],
[
[
11490,
11511
],
[
8073,
8094
]
]
] |
"""
foxBMS Software License
Copyright 2010-2016, Fraunhofer-Gesellschaft zur Foerderung
der angewandten Forschung e.V.
All rights reserved.
BSD 3-Clause License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
We kindly request you to use one or more of the following phrases to refer
to foxBMS in your hardware, software, documentation or advertising
materials:
"This product uses parts of foxBMS"
"This product includes parts of foxBMS"
"This product is derived from foxBMS"
If you use foxBMS in your products, we encourage you to contact us at:
CONTACT INFORMATION
Fraunhofer IISB ; Schottkystrasse 10 ; 91058 Erlangen, Germany
Dr.-Ing. Vincent LORENTZ
+49 9131-761-346
info@foxbms.org
www.foxbms.org
:author: Martin Giegerich <martin.giegerich@iisb.fraunhofer.de>
"""
import stm32interface
import argparse
import sys
import logging
"""
flash tool implementation to the STM32F4 microcontroller
- for detailed insight to the USART protocol refer to STM32 appnote AN3155
- for detailed insight to the device bootloader" refer to STM32 appnote AN2606
"""
class STM32Flasher(stm32interface.STM32Interface):
def __init__(self, port = None, file = None, baudrate=115200,
address = 0x08000000, goaddress = -1, bytes = 256,**kwargs):
stm32interface.STM32Interface.__init__(self, port, baudrate)
self._file = file
self.bytes = bytes
self.address = address
self._doBeforeInit()
self.init()
def _doBeforeInit(self):
''' abstract method to optionally reset microcontroller or toggle boot pins '''
pass
def __enter__(self):
return self
def read(self):
data = []
length = self.bytes
address = self.address
logging.debug("Flash Read Start, Length: {0}, Address: {1:#x} ".format(length, address))
while length > 256:
logging.debug("Read {0} bytes at {1:#x}".format(256, address))
data = data + self.readMemory(address, 256)
address += 256
length -= 256
logging.info("[{0}/{1}] read ".format(self.bytes-length, self.bytes))
logging.debug("Read {0} bytes at {1:#x}".format(length, address))
data = data + self.readMemory(address, length)
logging.info("[{0}/{1}] read".format(self.bytes, self.bytes))
return data
def write(self, data):
logging.debug("Flash Write Start")
length = len(data)
alllng = len(data)
address = self.address
offset = 0
while length > 256:
logging.debug("Write {0} bytes at {1:#x}".format(256, address))
self.writeMemory(address, data[offset:offset+256])
offset += 256
address += 256
length -= 256
logging.info("[{0}/{1}] written".format(alllng-length, alllng))
logging.debug("Write {0} bytes at {1:#x}".format(length, address))
self.writeMemory(address, data[offset:offset+length] )
logging.info("[{0}/{1}] written".format(alllng, alllng))
#logging.info("Flash Write End")
def erase(self):
logging.info("Flash Erase Start")
super(STM32Flasher, self).erase()
logging.info("Flash Erase End")
def verify(self, data):
logging.info("Flash verify")
self.bytes = len(data)
verify = self.read()
if data == verify:
logging.info("Verify successful")
return True
else:
self.veriFail = str(len(data)) + ' vs ' + str(len(verify)) + '\n'
for i in xrange(0, len(data)):
if data[i] != verify[i]:
self.veriFail += hex(i) + ': ' + hex(data[i]) + ' vs ' + hex(verify[i]) + '\n'
logging.error(self.veriFail)
return False
def __str__(self):
id = self.getId()[1:3] # id without length byte and ack byte
version = self.getVersion()
return "ID: %s Bootloader version: %x" % (hex(reduce(lambda x, y: x*0x100+y, id)), version[0])
def auto_int(x):
return int(x, 0)
def main():
parser = argparse.ArgumentParser(description='STM32 flash tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog = '''\
Example:
%s --port COM3 --erase --write --verify build/src/general/foxbms_flash.bin
Copyright (c) 2015, 2016 Fraunhofer IISB.
All rights reserved.
This program has been released under the conditions of the 3-clause BSD
license.
''' % sys.argv[0])
parser.add_argument('-v', '--verbosity', action='count', default=0, help="increase output verbosity")
parser.add_argument('--erase', '-e', action='store_true', help='erase firmware')
parser.add_argument('--read', '-r', action='store_true', help='read and store firmware')
parser.add_argument('--write', '-w', action='store_true', help='writes firmware')
parser.add_argument('--verify', '-y', action='store_true', help='verify the firmware')
parser.add_argument('--bytes', '-s', type=int, default = 256, help='bytes to read from the firmware')
parser.add_argument('--bauds', '-b', type=int, default=115200, help='transfer speed (bauds)')
parser.add_argument('--port', '-p', type=str, default='/dev/tty.usbserial-ftCYPMYJ', help='ttyUSB port')
parser.add_argument('--address', '-a', type=auto_int, default=0x08000000, help='target address')
parser.add_argument('--goaddress', '-g', type=auto_int, default=-1, help='start address (use -1 for default)')
parser.add_argument('firmware', metavar = 'FIRMWARE FILE', help='firmware binary')
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level = logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level = logging.DEBUG)
else:
logging.basicConfig(level = logging.ERROR)
if args.read:
if args.erase:
parser.error('Cannot use --erase together with --read')
if args.write:
parser.error('Cannot use --write together with --read')
if args.bytes == None:
parser.error('Please give a length (in bytes) to read')
with STM32Flasher(**vars(args)) as flasher:
if args.write or args.verify:
with open(args.firmware, 'rb') as f:
data = map(lambda c: ord(c), f.read())
if args.erase:
flasher.erase()
if args.write:
flasher.write(data)
if args.verify:
flasher.verify(data)
if args.read:
rdata = flasher.read()
with open(args.firmware, 'wb') as f:
f.write(''.join(map(chr,rdata)))
if args.goaddress > -1:
flasher.go(args.goaddress)
if __name__ == "__main__":
main() | [
[
[
2229,
2243
],
[
2527,
2541
],
[
2720,
2734
]
],
[
[
2251,
2259
],
[
5623,
5631
],
[
5708,
5716
]
],
[
[
2267,
2270
],
[
6008,
6011
]
],
[
[
2278,
2285
],
[
3222,
3229
],
[
3360,
3367
],
[
3544,
3551
],
[
3622,
3629
],
[
3751,
3758
],
[
3873,
3880
],
[
4052,
4059
],
[
4270,
4277
],
[
4342,
4349
],
[
4480,
4487
],
[
4616,
4623
],
[
4700,
4707
],
[
4786,
4793
],
[
4914,
4921
],
[
5259,
5266
],
[
7178,
7185
],
[
7206,
7213
],
[
7257,
7264
],
[
7285,
7292
],
[
7318,
7325
],
[
7346,
7353
]
],
[
[
2514,
2526
],
[
4664,
4676
],
[
7675,
7687
]
],
[
[
5563,
5571
],
[
6850,
6858
],
[
6953,
6961
]
],
[
[
5602,
5606
],
[
8304,
8308
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import country_converter as coco
import pandas as pd
from covsirphy.util.term import Term
from covsirphy.loading.db_base import _RemoteDatabase
class _OWID(_RemoteDatabase):
"""
Access "Our World In Data".
https://github.com/owid/covid-19-data/tree/master/public/data
https://ourworldindata.org/coronavirus
Args:
filename (str): CSV filename to save records
"""
# URL for vaccine data
URL_V = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/"
URL_V_REC = f"{URL_V}vaccinations.csv"
URL_V_LOC = f"{URL_V}locations.csv"
# URL for PCR data
URL_P = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/"
URL_P_REC = f"{URL_P}covid-testing-all-observations.csv"
# Citation
CITATION = "Hasell, J., Mathieu, E., Beltekian, D. et al." \
" A cross-country database of COVID-19 testing. Sci Data 7, 345 (2020)." \
" https://doi.org/10.1038/s41597-020-00688-8"
# Column names and data types
# {"name in database": "name defined in Term class"}
COL_DICT = {
"date": Term.DATE,
"location": Term.COUNTRY,
Term.PROVINCE: Term.PROVINCE,
"iso_code": Term.ISO3,
"vaccines": Term.PRODUCT,
"total_vaccinations": Term.VAC,
"people_vaccinated": Term.V_ONCE,
"people_fully_vaccinated": Term.V_FULL,
"tests": Term.TESTS,
}
def download(self, verbose):
"""
Download the dataset from the server and set the list of primary sources.
Args:
verbose (int): level of verbosity
Returns:
pandas.DataFrame
Index
reset index
Columns
defined by the first values of self.COL_DICT.values()
Note:
If @verbose is equal to or over 1, how to show the list will be explained.
"""
# Download datasets
if verbose:
print("Retrieving datasets from Our World In Data https://github.com/owid/covid-19-data/")
# Vaccinations
v_rec_cols = [
"date", "location", "iso_code", "total_vaccinations", "people_vaccinated", "people_fully_vaccinated"]
v_rec_df = pd.read_csv(self.URL_V_REC, usecols=v_rec_cols)
v_loc_df = pd.read_csv(self.URL_V_LOC, usecols=["location", "vaccines"])
v_df = v_rec_df.merge(v_loc_df, how="left", on="location")
# Tests
pcr_rec_cols = ["ISO code", "Date", "Daily change in cumulative total", "Cumulative total"]
pcr_df = pd.read_csv(self.URL_P_REC, usecols=pcr_rec_cols)
pcr_df = pcr_df.rename(columns={"ISO code": "iso_code", "Date": "date"})
pcr_df["cumsum"] = pcr_df.groupby("iso_code")["Daily change in cumulative total"].cumsum()
pcr_df = pcr_df.assign(tests=lambda x: x["Cumulative total"].fillna(x["cumsum"]))
# Combine data (vaccinations/tests)
df = v_df.set_index(["iso_code", "date"])
df = df.combine_first(pcr_df.set_index(["iso_code", "date"]).loc[:, ["tests"]])
df = df.reset_index()
# Location (country/province)
df["location"] = df["location"].replace(
{
# COG
"Congo": "Republic of the Congo",
}
)
df = df.loc[~df["iso_code"].str.contains("OWID_")]
df["location"] = df.groupby("iso_code")["location"].bfill()
df.loc[df["location"] == df["iso_code"], "location"] = None
df.loc[df["location"].isna(), "location"] = df.loc[df["location"].isna(), "iso_code"].apply(
lambda x: coco.convert(x, to="name_short", not_found=None))
df[self.PROVINCE] = self.UNKNOWN
return df
| [
[
[
54,
79
],
[
3695,
3699
]
],
[
[
87,
99
],
[
2319,
2321
],
[
2386,
2388
],
[
2648,
2650
]
],
[
[
132,
136
],
[
1227,
1231
],
[
1174,
1178
],
[
1205,
1209
],
[
1242,
1246
],
[
1277,
1281
],
[
1308,
1312
],
[
1352,
1356
],
[
1391,
1395
],
[
1439,
1443
],
[
1469,
1473
]
],
[
[
175,
190
],
[
205,
220
]
],
[
[
199,
204
]
]
] |
# -----------------------------------------------------------------------------
#
# Copyright (C) 2021 CERN & Newcastle University for the benefit of the
# BioDynaMo collaboration. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# See the LICENSE file distributed with this work for details.
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# -----------------------------------------------------------------------------
class Print:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
@staticmethod
def success(message):
print(Print.BOLD + Print.GREEN + str(message) + Print.END)
@staticmethod
def error(message):
print(Print.RED + str(message) + Print.END)
@staticmethod
def warning(message):
print(Print.YELLOW + str(message) + Print.END)
@staticmethod
def new_step(message):
print('\n' + Print.BOLD + Print.BLUE + str(message) + Print.END)
| [
[
[
600,
605
],
[
895,
900
],
[
908,
913
],
[
937,
942
],
[
1005,
1010
],
[
1032,
1037
],
[
1102,
1107
],
[
1132,
1137
],
[
1210,
1215
],
[
1223,
1228
],
[
1251,
1256
]
]
] |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from io import StringIO
import pytest
from jinja2 import Template
from flexget.entry import Entry
from flexget.logger import capture_output
from flexget.manager import get_parser, Session
from flexget.task import TaskAbort
from flexget.components.series import db
def age_series(**kwargs):
import datetime
session = Session()
session.query(db.EpisodeRelease).update({'first_seen': datetime.datetime.now() - datetime.timedelta(**kwargs)})
session.commit()
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True)
def config(request):
"""Override and parametrize default config fixture for all series tests."""
newconfig = Template(request.cls.config).render({'parser': request.param})
# Make sure we remembered to put the section in config
assert request.cls.config != newconfig, 'config parameterization did nothing?'
return newconfig
class TestQuality(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
exact_quality:
mock:
- {title: 'QTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'QTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'QTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'QTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'QTest.S01E01.720p.XViD-FlexGet'}
series:
- QTest:
quality: 720p
quality_fail:
mock:
- {title: 'Q2Test.S01E01.HDTV.XViD-FlexGet'}
- {title: 'Q2Test.S01E01.PDTV.XViD-FlexGet'}
- {title: 'Q2Test.S01E01.DSR.XViD-FlexGet'}
series:
- Q2Test:
quality: 720p
min_quality:
mock:
- {title: 'MinQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'MinQTest.S01E01.720p.XViD-FlexGet'}
series:
- MinQTest:
quality: ">720p"
max_quality:
mock:
- {title: 'MaxQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.1080p.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.720p.XViD-FlexGet'}
- {title: 'MaxQTest.S01E01.720p.bluray-FlexGet'}
series:
- MaxQTest:
quality: "<720p <=HDTV"
min_max_quality:
mock:
- {title: 'MinMaxQTest.S01E01.HDTV.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.PDTV.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.DSR.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.720p.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.HR.XViD-FlexGet'}
- {title: 'MinMaxQTest.S01E01.1080p.XViD-FlexGet'}
series:
- MinMaxQTest:
quality: 480p-hr
max_unknown_quality:
mock:
- {title: 'MaxUnknownQTest.S01E01.XViD-FlexGet'}
series:
- MaxUnknownQTest:
quality: "<=hdtv"
quality_from_group:
mock:
- {title: 'GroupQual.S01E01.HDTV.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.PDTV.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.DSR.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.1080p.XViD-FlexGet'}
- {title: 'GroupQual.S01E01.720p.XViD-FlexGet'}
- {title: 'Other.S01E01.hdtv.dd5.1.XViD-FlexGet'}
- {title: 'Other.S01E01.720p.hdtv.XViD-FlexGet'}
series:
720P:
- GroupQual
# Test that an integer group name doesn't cause an exception.
1080:
- Test
hdtv <hr !dd5.1:
- Other
quality_in_series_name:
mock:
- title: my 720p show S01E01
- title: my 720p show S01E02 720p
series:
- my 720p show:
quality: '<720p'
"""
def test_exact_quality(self, execute_task):
"""Series plugin: choose by quality"""
task = execute_task('exact_quality')
assert task.find_entry('accepted', title='QTest.S01E01.720p.XViD-FlexGet'), \
'720p should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_quality_fail(self, execute_task):
task = execute_task('quality_fail')
assert not task.accepted, 'No qualities should have matched'
def test_min_quality(self, execute_task):
"""Series plugin: min_quality"""
task = execute_task('min_quality')
assert task.find_entry('accepted', title='MinQTest.S01E01.1080p.XViD-FlexGet'), \
'MinQTest.S01E01.1080p.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_max_quality(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('max_quality')
assert task.find_entry('accepted', title='MaxQTest.S01E01.HDTV.XViD-FlexGet'), \
'MaxQTest.S01E01.HDTV.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_min_max_quality(self, execute_task):
"""Series plugin: min_quality with max_quality"""
task = execute_task('min_max_quality')
assert task.find_entry('accepted', title='MinMaxQTest.S01E01.HR.XViD-FlexGet'), \
'MinMaxQTest.S01E01.HR.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_max_unknown_quality(self, execute_task):
"""Series plugin: max quality with unknown quality"""
task = execute_task('max_unknown_quality')
assert len(task.accepted) == 1, 'should have accepted'
def test_group_quality(self, execute_task):
"""Series plugin: quality from group name"""
task = execute_task('quality_from_group')
assert task.find_entry('accepted', title='GroupQual.S01E01.720p.XViD-FlexGet'), \
'GroupQual.S01E01.720p.XViD-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one (no entries should pass for series `other`'
def test_quality_in_series_name(self, execute_task):
"""Make sure quality in title does not get parsed as quality"""
task = execute_task('quality_in_series_name')
assert task.find_entry('accepted', title='my 720p show S01E01'), \
'quality in title should not have been parsed'
assert len(task.accepted) == 1, 'should not have accepted 720p entry'
class TestDatabase(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- some series
- progress
tasks:
test_1:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-FlexGet'}
test_2:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-DoppelGanger'}
progress_1:
mock:
- {title: 'Progress.S01E20.720p-FlexGet'}
- {title: 'Progress.S01E20.HDTV-FlexGet'}
progress_2:
mock:
- {title: 'Progress.S01E20.720p.Another-FlexGet'}
- {title: 'Progress.S01E20.HDTV-Another-FlexGet'}
"""
def test_database(self, execute_task):
"""Series plugin: simple database"""
task = execute_task('test_1')
task = execute_task('test_2')
assert task.find_entry('rejected', title='Some.Series.S01E20.720p.XViD-DoppelGanger'), \
'failed basic download remembering'
def test_doppelgangers(self, execute_task):
"""Series plugin: doppelganger releases (dupes)"""
task = execute_task('progress_1')
assert task.find_entry('accepted', title='Progress.S01E20.720p-FlexGet'), \
'best quality not accepted'
# should not accept anything
task = execute_task('progress_1')
assert not task.accepted, 'repeated execution accepted'
# introduce new doppelgangers
task = execute_task('progress_2')
assert not task.accepted, 'doppelgangers accepted'
class TestFilterSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'Some.Series.S01E20.720p.XViD-FlexGet'}
- {title: 'Another.Series.S01E20.720p.XViD-FlexGet'}
- {title: 'Another.Series.S01E21.1080p.H264-FlexGet'}
- {title: 'Date.Series.10-11-2008.XViD'}
- {title: 'Date.Series.10.12.2008.XViD'}
- {title: 'Date.Series.2008-10-13.XViD'}
- {title: 'Date.Series.10.14.09.XViD'}
- {title: 'Date Series 2010 11 17 XViD'}
- {title: 'Useless title', filename: 'Filename.Series.S01E26.XViD'}
- {title: 'Empty.Description.S01E22.XViD', description: ''}
# test chaining
regexp:
reject:
- 1080p
series:
- another series
- date series
- filename series
- empty description
- (some) series
metainfo_series_override:
metainfo_series: yes
mock:
- {title: 'Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet'}
- {title: 'Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet'}
series:
- Test Series
test_all_series_mode:
mock:
- {title: 'Test.Series.S01E02.PDTV.XViD-FlexGet'}
- {title: 'Test Series - 1x03 - PDTV XViD-FlexGet'}
- {title: 'Other.Show.S02E01.PDTV.XViD-FlexGet'}
- {title: 'other show season 2 episode 2'}
- {title: 'Date.Show.03-29-2012.HDTV.XViD-FlexGet'}
all_series: yes
test_alternate_name:
mock:
- title: The.Show.S01E01
- title: Other.Name.S01E02
- title: many.names.S01E01
- title: name.1.S01E02
- title: name.2.S01E03
- title: paren.title.2013.S01E01
series:
- The Show:
alternate_name: Other Name
- many names:
alternate_name:
- name 1
- name 2
- paren title (US):
alternate_name: paren title 2013
test_input_order_preserved:
series:
- Some Show
"""
def test_smoke(self, execute_task):
"""Series plugin: test several standard features"""
task = execute_task('test')
# normal passing
assert task.find_entry(title='Another.Series.S01E20.720p.XViD-FlexGet'), \
'Another.Series.S01E20.720p.XViD-FlexGet should have passed'
# series with brackets
assert task.find_entry('accepted', title='Some.Series.S01E20.720p.XViD-FlexGet'), \
'Some.Series.S01E20.720p.XViD-FlexGet should have been accepted'
# date formats
df = ['Date.Series.10-11-2008.XViD', 'Date.Series.10.12.2008.XViD', 'Date Series 2010 11 17 XViD',
'Date.Series.2008-10-13.XViD', 'Date.Series.10.14.09.XViD']
for d in df:
entry = task.find_entry(title=d)
assert entry, 'Date format did not match %s' % d
assert 'series_parser' in entry, 'series_parser missing from %s' % d
assert entry['series_parser'].id_type == 'date', '%s did not return three groups for dates' % d
# parse from filename
assert task.find_entry(filename='Filename.Series.S01E26.XViD'), 'Filename parsing failed'
# empty description
assert task.find_entry(title='Empty.Description.S01E22.XViD'), 'Empty Description failed'
# chaining with regexp plugin
assert task.find_entry('rejected', title='Another.Series.S01E21.1080p.H264-FlexGet'), \
'regexp chaining'
def test_metainfo_series_override(self, execute_task):
"""Series plugin: override metainfo_series"""
task = execute_task('metainfo_series_override')
# Make sure the metainfo_series plugin is working first
entry = task.find_entry('entries', title='Other.Show.with.extra.crap.S02E01.PDTV.XViD-FlexGet')
assert entry['series_guessed'], 'series should have been guessed'
assert entry['series_name'] == entry['series_parser'].name == 'Other Show With Extra Crap', \
'metainfo_series is not running'
# Make sure the good series data overrode metainfo data for the listed series
entry = task.find_entry('accepted', title='Test.Series.with.extra.crap.S01E02.PDTV.XViD-FlexGet')
assert not entry.get('series_guessed'), 'series plugin should override series_guessed'
assert entry['series_name'] == entry['series_parser'].name == 'Test Series', \
'Series name should be \'Test Series\', was: entry: %s, parser: %s' % (
entry['series_name'], entry['series_parser'].name)
def test_all_series_mode(self, execute_task):
"""Series plugin: test all option"""
task = execute_task('test_all_series_mode')
assert task.find_entry('accepted', title='Test.Series.S01E02.PDTV.XViD-FlexGet')
task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet')
entry = task.find_entry('accepted', title='Test Series - 1x03 - PDTV XViD-FlexGet')
assert entry
assert entry.get('series_name') == 'Test Series'
entry = task.find_entry('accepted', title='Other.Show.S02E01.PDTV.XViD-FlexGet')
assert entry.get('series_guessed')
entry2 = task.find_entry('accepted', title='other show season 2 episode 2')
# Make sure case is normalized so series are marked with the same name no matter the case in the title
assert entry.get('series_name') == entry2.get(
'series_name') == 'Other Show', 'Series names should be in title case'
entry = task.find_entry('accepted', title='Date.Show.03-29-2012.HDTV.XViD-FlexGet')
assert entry.get('series_guessed')
assert entry.get('series_name') == 'Date Show'
def test_alternate_name(self, execute_task):
task = execute_task('test_alternate_name')
assert all(e.accepted for e in task.all_entries), 'All releases should have matched a show'
@pytest.mark.parametrize('reverse', [False, True])
def test_input_order_preserved(self, manager, execute_task, reverse):
"""If multiple versions of an episode are acceptable, make sure the first one is accepted."""
entries = [
Entry(title='Some Show S01E01 720p proper', url='http://a'),
Entry(title='Some Show S01E01 1080p', url='http://b')
]
if reverse:
entries.reverse()
task = execute_task('test_input_order_preserved', options={'inject': entries})
assert task.accepted[0] == entries[0], 'first entry should have been accepted'
class TestEpisodeAdvancement(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_backwards_1:
mock:
- {title: 'backwards s02e12'}
- {title: 'backwards s02e10'}
series:
- backwards
test_backwards_2:
mock:
- {title: 'backwards s02e01'}
series:
- backwards
test_backwards_3:
mock:
- {title: 'backwards s01e01'}
series:
- backwards
test_backwards_okay_1:
mock:
- {title: 'backwards s01e02'}
series:
- backwards:
tracking: backfill
test_backwards_okay_2:
mock:
- {title: 'backwards s01e03'}
series:
- backwards:
tracking: no
test_forwards_1:
mock:
- {title: 'forwards s01e01'}
series:
- forwards
test_forwards_2:
mock:
- {title: 'forwards s02e01'}
series:
- forwards
test_forwards_3:
mock:
- {title: 'forwards s03e01'}
series:
- forwards
test_forwards_4:
mock:
- {title: 'forwards s04e02'}
series:
- forwards
test_forwards_5:
mock:
- {title: 'forwards s05e01'}
series:
- forwards
test_forwards_okay_1:
mock:
- {title: 'forwards s05e01'}
series:
- forwards:
tracking: no
test_unordered:
mock:
- {title: 'zzz s01e05'}
- {title: 'zzz s01e06'}
- {title: 'zzz s01e07'}
- {title: 'zzz s01e08'}
- {title: 'zzz s01e09'}
- {title: 'zzz s01e10'}
- {title: 'zzz s01e15'}
- {title: 'zzz s01e14'}
- {title: 'zzz s01e13'}
- {title: 'zzz s01e12'}
- {title: 'zzz s01e11'}
- {title: 'zzz s01e01'}
series:
- zzz
test_seq1:
mock:
- title: seq 05
series:
- seq
test_seq2:
mock:
- title: seq 06
series:
- seq
test_seq3:
mock:
- title: seq 10
series:
- seq
test_seq4:
mock:
- title: seq 01
series:
- seq
"""
def test_backwards(self, execute_task):
"""Series plugin: episode advancement (backwards)"""
task = execute_task('test_backwards_1')
assert task.find_entry('accepted', title='backwards s02e12'), \
'backwards s02e12 should have been accepted'
assert task.find_entry('accepted', title='backwards s02e10'), \
'backwards s02e10 should have been accepted within grace margin'
task = execute_task('test_backwards_2')
assert task.find_entry('accepted', title='backwards s02e01'), \
'backwards s02e01 should have been accepted, in current season'
task = execute_task('test_backwards_3')
assert task.find_entry('rejected', title='backwards s01e01'), \
'backwards s01e01 should have been rejected, in previous season'
task = execute_task('test_backwards_okay_1')
assert task.find_entry('accepted', title='backwards s01e02'), \
'backwards s01e01 should have been accepted, backfill enabled'
task = execute_task('test_backwards_okay_2')
assert task.find_entry('accepted', title='backwards s01e03'), \
'backwards s01e01 should have been accepted, tracking off'
def test_forwards(self, execute_task):
"""Series plugin: episode advancement (future)"""
task = execute_task('test_forwards_1')
assert task.find_entry('accepted', title='forwards s01e01'), \
'forwards s01e01 should have been accepted'
task = execute_task('test_forwards_2')
assert task.find_entry('accepted', title='forwards s02e01'), \
'forwards s02e01 should have been accepted'
task = execute_task('test_forwards_3')
assert task.find_entry('accepted', title='forwards s03e01'), \
'forwards s03e01 should have been accepted'
task = execute_task('test_forwards_4')
assert task.find_entry('rejected', title='forwards s04e02'), \
'forwards s04e02 should have been rejected'
task = execute_task('test_forwards_5')
assert task.find_entry('rejected', title='forwards s05e01'), \
'forwards s05e01 should have been rejected'
task = execute_task('test_forwards_okay_1')
assert task.find_entry('accepted', title='forwards s05e01'), \
'forwards s05e01 should have been accepted with tracking turned off'
def test_unordered(self, execute_task):
"""Series plugin: unordered episode advancement"""
task = execute_task('test_unordered')
assert len(task.accepted) == 12, \
'not everyone was accepted'
def test_sequence(self, execute_task):
# First should be accepted
task = execute_task('test_seq1')
entry = task.find_entry('accepted', title='seq 05')
assert entry['series_id'] == 5
# Next in sequence should be accepted
task = execute_task('test_seq2')
entry = task.find_entry('accepted', title='seq 06')
assert entry['series_id'] == 6
# Should be too far in the future
task = execute_task('test_seq3')
entry = task.find_entry(title='seq 10')
assert entry not in task.accepted, 'Should have been too far in future'
# Should be too far in the past
task = execute_task('test_seq4')
entry = task.find_entry(title='seq 01')
assert entry not in task.accepted, 'Should have been too far in the past'
class TestFilterSeriesPriority(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'foobar 720p s01e01'}
- {title: 'foobar hdtv s01e01'}
regexp:
reject:
- 720p
series:
- foobar
"""
def test_priorities(self, execute_task):
"""Series plugin: regexp plugin is able to reject before series plugin"""
task = execute_task('test')
assert task.find_entry('rejected', title='foobar 720p s01e01'), \
'foobar 720p s01e01 should have been rejected'
assert task.find_entry('accepted', title='foobar hdtv s01e01'), \
'foobar hdtv s01e01 is not accepted'
class TestPropers(object):
config = """
templates:
global:
parsing:
series: {{parser}}
# prevents seen from rejecting on second execution,
# we want to see that series is able to reject
disable: builtins
series:
- test
- foobar
- asfd:
quality: HR-1080p
- V
- tftest:
propers: 3 hours
- notest:
propers: no
tasks:
propers_1:
mock:
- {title: 'Test.S01E01.720p-FlexGet'}
# introduce proper, should be accepted
propers_2:
mock:
- {title: 'Test.S01E01.720p.Proper-FlexGet'}
# introduce non-proper, should not be downloaded
propers_3:
mock:
- {title: 'Test.S01E01.FlexGet'}
# introduce proper at the same time, should nuke non-proper and get proper
proper_at_first:
mock:
- {title: 'Foobar.S01E01.720p.FlexGet'}
- {title: 'Foobar.S01E01.720p.proper.FlexGet'}
# test a lot of propers at once
lot_propers:
mock:
- {title: 'V.2009.S01E01.PROPER.HDTV.A'}
- {title: 'V.2009.S01E01.PROPER.HDTV.B'}
- {title: 'V.2009.S01E01.PROPER.HDTV.C'}
diff_quality_1:
mock:
- {title: 'Test.S01E02.720p-FlexGet'}
# low quality proper, should not be accepted
diff_quality_2:
mock:
- {title: 'Test.S01E02.HDTV.Proper-FlexGet'}
# min + max quality with propers
min_max_quality_1:
mock:
- {title: 'asfd.S01E01.720p-FlexGet'}
min_max_quality_2:
mock:
- {title: 'asfd.S01E01.720p.Proper-FlexGet'}
proper_timeframe_1:
mock:
- {title: 'TFTest.S01E01.720p-FlexGet'}
proper_timeframe_2:
mock:
- {title: 'TFTest.S01E01.720p.proper-FlexGet'}
no_propers_1:
mock:
- {title: 'NoTest.S01E01.720p-FlexGet'}
no_propers_2:
mock:
- {title: 'NoTest.S01E01.720p.proper-FlexGet'}
proper_upgrade_1:
mock:
- {title: 'Test.S02E01.hdtv.proper'}
proper_upgrade_2:
mock:
- {title: 'Test.S02E01.hdtv.real.proper'}
anime_proper_1:
mock:
- title: test 04v0 hdtv
anime_proper_2:
mock:
- title: test 04 hdtv
fastsub_proper_1:
mock:
- title: test s01e01 Fastsub hdtv
fastsub_proper_2:
mock:
- title: test s01e01 Fastsub repack hdtv
fastsub_proper_3:
mock:
- title: test s01e01 hdtv
fastsub_proper_4:
mock:
- title: test s01e01 proper hdtv
"""
def test_propers_timeframe(self, execute_task):
"""Series plugin: propers timeframe"""
task = execute_task('proper_timeframe_1')
assert task.find_entry('accepted', title='TFTest.S01E01.720p-FlexGet'), \
'Did not accept before timeframe'
# let 6 hours pass
age_series(hours=6)
task = execute_task('proper_timeframe_2')
assert task.find_entry('rejected', title='TFTest.S01E01.720p.proper-FlexGet'), \
'Did not reject after proper timeframe'
def test_no_propers(self, execute_task):
"""Series plugin: no propers at all"""
task = execute_task('no_propers_1')
assert len(task.accepted) == 1, 'broken badly'
task = execute_task('no_propers_2')
assert len(task.rejected) == 1, 'accepted proper'
def test_min_max_propers(self, execute_task):
"""Series plugin: min max propers"""
task = execute_task('min_max_quality_1')
assert len(task.accepted) == 1, 'uhh, broken badly'
task = execute_task('min_max_quality_2')
assert len(task.accepted) == 1, 'should have accepted proper'
def test_lot_propers(self, execute_task):
"""Series plugin: proper flood"""
task = execute_task('lot_propers')
assert len(task.accepted) == 1, 'should have accepted (only) one of the propers'
def test_diff_quality_propers(self, execute_task):
"""Series plugin: proper in different/wrong quality"""
task = execute_task('diff_quality_1')
assert len(task.accepted) == 1
task = execute_task('diff_quality_2')
assert len(task.accepted) == 0, 'should not have accepted lower quality proper'
def test_propers(self, execute_task):
"""Series plugin: proper accepted after episode is downloaded"""
# start with normal download ...
task = execute_task('propers_1')
assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \
'Test.S01E01-FlexGet should have been accepted'
# rejects downloaded
task = execute_task('propers_1')
assert task.find_entry('rejected', title='Test.S01E01.720p-FlexGet'), \
'Test.S01E01-FlexGet should have been rejected'
# accepts proper
task = execute_task('propers_2')
assert task.find_entry('accepted', title='Test.S01E01.720p.Proper-FlexGet'), \
'new undownloaded proper should have been accepted'
# reject downloaded proper
task = execute_task('propers_2')
assert task.find_entry('rejected', title='Test.S01E01.720p.Proper-FlexGet'), \
'downloaded proper should have been rejected'
# reject episode that has been downloaded normally and with proper
task = execute_task('propers_3')
assert task.find_entry('rejected', title='Test.S01E01.FlexGet'), \
'Test.S01E01.FlexGet should have been rejected'
def test_proper_available(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('proper_at_first')
assert task.find_entry('accepted', title='Foobar.S01E01.720p.proper.FlexGet'), \
'Foobar.S01E01.720p.proper.FlexGet should have been accepted'
def test_proper_upgrade(self, execute_task):
"""Series plugin: real proper after proper"""
task = execute_task('proper_upgrade_1')
assert task.find_entry('accepted', title='Test.S02E01.hdtv.proper')
task = execute_task('proper_upgrade_2')
assert task.find_entry('accepted', title='Test.S02E01.hdtv.real.proper')
def test_anime_proper(self, execute_task):
task = execute_task('anime_proper_1')
assert task.accepted, 'ep should have accepted'
task = execute_task('anime_proper_2')
assert task.accepted, 'proper ep should have been accepted'
def test_fastsub_proper(self, execute_task):
task = execute_task('fastsub_proper_1')
assert task.accepted, 'ep should have accepted'
task = execute_task('fastsub_proper_2')
assert task.accepted, 'proper ep should have been accepted'
task = execute_task('fastsub_proper_3')
assert task.accepted, 'proper ep should have been accepted'
task = execute_task('fastsub_proper_4')
assert task.accepted, 'proper ep should have been accepted'
class TestSimilarNames(object):
# hmm, not very good way to test this .. seriesparser should be tested alone?
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: 'FooBar.S03E01.DSR-FlexGet'}
- {title: 'FooBar: FirstAlt.S02E01.DSR-FlexGet'}
- {title: 'FooBar: SecondAlt.S01E01.DSR-FlexGet'}
series:
- FooBar
- 'FooBar: FirstAlt'
- 'FooBar: SecondAlt'
test_ambiguous:
mock:
- title: Foo.2.2
series:
- Foo:
identified_by: sequence
- Foo 2:
identified_by: sequence
"""
def test_names(self, execute_task):
"""Series plugin: similar namings"""
task = execute_task('test')
assert task.find_entry('accepted', title='FooBar.S03E01.DSR-FlexGet'), 'Standard failed?'
assert task.find_entry('accepted', title='FooBar: FirstAlt.S02E01.DSR-FlexGet'), 'FirstAlt failed'
assert task.find_entry('accepted', title='FooBar: SecondAlt.S01E01.DSR-FlexGet'), 'SecondAlt failed'
def test_ambiguous(self, execute_task):
task = execute_task('test_ambiguous')
# In the event of ambiguous match, more specific one should be chosen
assert task.find_entry('accepted', title='Foo.2.2')['series_name'] == 'Foo 2'
class TestDuplicates(object):
config = """
templates:
global:
parsing:
series: {{parser}}
# just cleans log a bit ..
disable:
- seen
tasks:
test_dupes:
mock:
- {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[FlexGet]'}
- {title: 'Foo.2009.S02E04.HDTV.XviD-2HD[ASDF]'}
series:
- Foo 2009
test_1:
mock:
- {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[FlexGet]'}
- {title: 'Foo.Bar.S02E04.HDTV.XviD-2HD[ASDF]'}
series:
- foo bar
test_2:
mock:
- {title: 'Foo.Bar.S02E04.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]'}
- {title: 'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]'}
- {title: 'Foo.Bar.S02E03.HDTV.XviD-FlexGet'}
- {title: 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY'}
series:
- foo bar
test_true_dupes:
mock:
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
- {title: 'Dupe.S02E04.HDTV.XviD-FlexGet'}
series:
- dupe
"""
def test_dupes(self, execute_task):
"""Series plugin: dupes with same quality"""
task = execute_task('test_dupes')
assert len(task.accepted) == 1, 'accepted both'
def test_true_dupes(self, execute_task):
"""Series plugin: true duplicate items"""
task = execute_task('test_true_dupes')
assert len(task.accepted) == 1, 'should have accepted (only) one'
def test_downloaded(self, execute_task):
"""Series plugin: multiple downloaded and new episodes are handled correctly"""
task = execute_task('test_1')
task = execute_task('test_2')
# these should be accepted
accepted = ['Foo.Bar.S02E03.HDTV.XviD-FlexGet', 'Foo.Bar.S02E05.720p.HDTV.XviD-YYY']
for item in accepted:
assert task.find_entry('accepted', title=item), \
'%s should have been accepted' % item
# these should be rejected
rejected = ['Foo.Bar.S02E04.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.720p.XviD-2HD[FlexGet]',
'Foo.Bar.S02E04.DSRIP.XviD-2HD[ASDF]', 'Foo.Bar.S02E04.HDTV.1080p.XviD-2HD[ASDF]']
for item in rejected:
assert task.find_entry('rejected', title=item), \
'%s should have been rejected' % item
class TestQualities(object):
config = """
templates:
global:
parsing:
series: {{parser}}
disable: builtins
series:
- FooBar:
qualities:
- SDTV
- 720p
- 1080p
- FooBaz:
upgrade: yes
qualities:
- hdtv
- hr
- 720p
- FooBum:
quality: 720p-1080i
upgrade: yes
- FooD:
target: 720p
timeframe: 0 hours
upgrade: yes
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
- {title: 'FooBar.S01E01.1080p-FlexGet'}
- {title: 'FooBar.S01E01.HR-FlexGet'}
test_2:
mock:
- {title: 'FooBar.S01E01.720p-FlexGet'}
propers_1:
mock:
- {title: 'FooBar.S01E02.720p-FlexGet'}
propers_2:
mock:
- {title: 'FooBar.S01E02.720p.Proper-FlexGet'}
upgrade_1:
mock:
- {title: 'FooBaz.S01E02.pdtv-FlexGet'}
- {title: 'FooBaz.S01E02.HR-FlexGet'}
upgrade_2:
mock:
- {title: 'FooBaz.S01E02.720p-FlexGet'}
- {title: 'FooBaz.S01E02.1080p-FlexGet'}
upgrade_3:
mock:
- {title: 'FooBaz.S01E02.hdtv-FlexGet'}
- {title: 'FooBaz.S01E02.720p rc-FlexGet'}
quality_upgrade_1:
mock:
- title: FooBum.S03E01.1080p # too high
- title: FooBum.S03E01.hdtv # too low
- title: FooBum.S03E01.720p # in range
quality_upgrade_2:
mock:
- title: FooBum.S03E01.1080i # should be upgraded to
- title: FooBum.S03E01.720p-ver2 # Duplicate ep
target_1:
mock:
- title: Food.S06E11.hdtv
target_2:
mock:
- title: Food.S06E11.1080p
- title: Food.S06E11.720p
"""
def test_qualities(self, execute_task):
"""Series plugin: qualities"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet'), \
'Didn''t accept FooBar.S01E01.PDTV-FlexGet'
assert task.find_entry('accepted', title='FooBar.S01E01.1080p-FlexGet'), \
'Didn''t accept FooBar.S01E01.1080p-FlexGet'
assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \
'Accepted FooBar.S01E01.HR-FlexGet'
task = execute_task('test_2')
assert task.find_entry('accepted', title='FooBar.S01E01.720p-FlexGet'), \
'Didn''t accept FooBar.S01E01.720p-FlexGet'
# test that it rejects them afterwards
task = execute_task('test_1')
assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-FlexGet'), \
'Didn\'t reject FooBar.S01E01.PDTV-FlexGet'
assert task.find_entry('rejected', title='FooBar.S01E01.1080p-FlexGet'), \
'Didn\'t reject FooBar.S01E01.1080p-FlexGet'
assert not task.find_entry('accepted', title='FooBar.S01E01.HR-FlexGet'), \
'Accepted FooBar.S01E01.HR-FlexGet'
def test_propers(self, execute_task):
"""Series plugin: qualities + propers"""
task = execute_task('propers_1')
assert task.accepted
task = execute_task('propers_2')
assert task.accepted, 'proper not accepted'
task = execute_task('propers_2')
assert not task.accepted, 'proper accepted again'
def test_qualities_upgrade(self, execute_task):
task = execute_task('upgrade_1')
assert task.find_entry('accepted', title='FooBaz.S01E02.HR-FlexGet'), 'HR quality should be accepted'
assert len(task.accepted) == 1, 'Only best quality should be accepted'
task = execute_task('upgrade_2')
assert task.find_entry('accepted', title='FooBaz.S01E02.720p-FlexGet'), '720p quality should be accepted'
assert len(task.accepted) == 1, 'Only best quality should be accepted'
task = execute_task('upgrade_3')
assert not task.accepted, 'Should not have accepted worse qualities'
def test_quality_upgrade(self, execute_task):
task = execute_task('quality_upgrade_1')
assert len(task.accepted) == 1, 'Only one ep should have passed quality filter'
assert task.find_entry('accepted', title='FooBum.S03E01.720p')
task = execute_task('quality_upgrade_2')
assert len(task.accepted) == 1, 'one ep should be valid upgrade'
assert task.find_entry('accepted', title='FooBum.S03E01.1080i')
def test_target_upgrade(self, execute_task):
task = execute_task('target_1')
assert len(task.accepted) == 1, 'Only one ep should have been grabbed'
assert task.find_entry('accepted', title='Food.S06E11.hdtv')
task = execute_task('target_2')
assert len(task.accepted) == 1, 'one ep should be valid upgrade'
assert task.find_entry('accepted', title='Food.S06E11.720p'), 'Should upgrade to `target`'
class TestIdioticNumbering(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar:
identified_by: ep
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
test_2:
mock:
- {title: 'FooBar.102.PDTV-FlexGet'}
"""
def test_idiotic(self, execute_task):
"""Series plugin: idiotic numbering scheme"""
task = execute_task('test_1')
task = execute_task('test_2')
entry = task.find_entry(title='FooBar.102.PDTV-FlexGet')
assert entry, 'entry not found?'
assert entry['series_season'] == 1, 'season not detected'
assert entry['series_episode'] == 2, 'episode not detected'
class TestNormalization(object):
config = """
templates:
global:
parsing:
series: {{parser}}
disable: [seen]
tasks:
test_1:
mock:
- {title: 'FooBar.S01E01.PDTV-FlexGet'}
series:
- FOOBAR
test_2:
mock:
- {title: 'FooBar.S01E01.PDTV-aoeu'}
series:
- foobar
test_3:
mock:
- title: Foo bar & co 2012.s01e01.sdtv.a
series:
- foo bar & co 2012
test_4:
mock:
- title: Foo bar & co 2012.s01e01.sdtv.b
series:
- Foo/Bar and Co. (2012)
"""
def test_capitalization(self, execute_task):
"""Series plugin: configuration capitalization"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S01E01.PDTV-FlexGet')
task = execute_task('test_2')
assert task.find_entry('rejected', title='FooBar.S01E01.PDTV-aoeu')
def test_normalization(self, execute_task):
task = execute_task('test_3')
assert task.find_entry('accepted', title='Foo bar & co 2012.s01e01.sdtv.a')
task = execute_task('test_4')
assert task.find_entry('rejected', title='Foo bar & co 2012.s01e01.sdtv.b')
class TestMixedNumbering(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar:
identified_by: ep
tasks:
test_1:
mock:
- {title: 'FooBar.S03E07.PDTV-FlexGet'}
test_2:
mock:
- {title: 'FooBar.0307.PDTV-FlexGet'}
"""
def test_mixednumbering(self, execute_task):
"""Series plugin: Mixed series numbering"""
task = execute_task('test_1')
assert task.find_entry('accepted', title='FooBar.S03E07.PDTV-FlexGet')
task = execute_task('test_2')
assert task.find_entry('rejected', title='FooBar.0307.PDTV-FlexGet')
class TestExact(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
auto:
mock:
- {title: 'ABC.MIAMI.S01E01.PDTV-FlexGet'}
- {title: 'ABC.S01E01.PDTV-FlexGet'}
- {title: 'ABC.LA.S01E01.PDTV-FlexGet'}
series:
- ABC
- ABC LA
- ABC Miami
name_regexp:
mock:
- title: show s09e05 hdtv
- title: show a s09e06 hdtv
series:
- show:
name_regexp: ^show
exact: yes
date:
mock:
- title: date show 04.01.2011 hdtv
- title: date show b 04.02.2011 hdtv
series:
- date show:
exact: yes
"""
def test_auto(self, execute_task):
"""Series plugin: auto enable exact"""
task = execute_task('auto')
assert task.find_entry('accepted', title='ABC.S01E01.PDTV-FlexGet')
assert task.find_entry('accepted', title='ABC.LA.S01E01.PDTV-FlexGet')
assert task.find_entry('accepted', title='ABC.MIAMI.S01E01.PDTV-FlexGet')
def test_with_name_regexp(self, execute_task):
task = execute_task('name_regexp')
assert task.find_entry('accepted', title='show s09e05 hdtv')
assert not task.find_entry('accepted', title='show a s09e06 hdtv')
def test_dated_show(self, execute_task):
task = execute_task('date')
assert task.find_entry('accepted', title='date show 04.01.2011 hdtv')
assert not task.find_entry('accepted', title='date show b 04.02.2011 hdtv')
class TestTimeframe(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- test:
timeframe: 5 hours
target: 720p
tasks:
test_no_waiting:
mock:
- {title: 'Test.S01E01.720p-FlexGet'}
test_stop_waiting_1:
mock:
- {title: 'Test.S01E02.HDTV-FlexGet'}
test_stop_waiting_2:
mock:
- {title: 'Test.S01E02.720p-FlexGet'}
test_proper_afterwards:
mock:
- {title: 'Test.S01E02.720p.Proper-FlexGet'}
test_expires:
mock:
- {title: 'Test.S01E03.pdtv-FlexGet'}
test_min_max_fail:
series:
- mm test:
timeframe: 5 hours
target: 720p
quality: hdtv+ <=720p
mock:
- {title: 'MM Test.S01E02.pdtv-FlexGet'}
- {title: 'MM Test.S01E02.1080p-FlexGet'}
test_min_max_pass:
series:
- mm test:
timeframe: 5 hours
target: 720p
quality: hdtv+ <=720p
mock:
- {title: 'MM Test.S01E02.pdtv-FlexGet'}
- {title: 'MM Test.S01E02.hdtv-FlexGet'}
- {title: 'MM Test.S01E02.1080p-FlexGet'}
test_qualities_fail:
series:
- q test:
timeframe: 5 hours
qualities:
- hdtv
- 1080p
mock:
- {title: 'Q Test.S01E02.pdtv-FlexGet'}
- {title: 'Q Test.S01E02.1080p-FlexGet'}
test_qualities_pass:
series:
- q test:
timeframe: 5 hours
qualities:
- sdtv
- 720p
mock:
- {title: 'Q Test.S01E02.1080p-FlexGet'}
test_with_quality_1:
series:
- q test:
timeframe: 5 hours
quality: hdtv+
target: 720p
mock:
- title: q test s01e01 pdtv 720p
test_with_quality_2:
series:
- q test:
timeframe: 5 hours
quality: hdtv+
target: 720p
mock:
- title: q test s01e01 hdtv
"""
def test_no_waiting(self, execute_task):
"""Series plugin: no timeframe waiting needed"""
task = execute_task('test_no_waiting')
assert task.find_entry('accepted', title='Test.S01E01.720p-FlexGet'), \
'720p not accepted immediattely'
def test_stop_waiting(self, execute_task):
"""Series plugin: timeframe quality appears, stop waiting, proper appears"""
task = execute_task('test_stop_waiting_1')
assert task.entries and not task.accepted
task = execute_task('test_stop_waiting_2')
assert task.find_entry('accepted', title='Test.S01E02.720p-FlexGet'), \
'720p should have caused stop waiting'
task = execute_task('test_proper_afterwards')
assert task.find_entry('accepted', title='Test.S01E02.720p.Proper-FlexGet'), \
'proper should have been accepted'
def test_expires(self, execute_task):
"""Series plugin: timeframe expires"""
# first execution should not accept anything
task = execute_task('test_expires')
assert not task.accepted
# let 3 hours pass
age_series(hours=3)
task = execute_task('test_expires')
assert not task.accepted, 'expired too soon'
# let another 3 hours pass, should expire now!
age_series(hours=6)
task = execute_task('test_expires')
assert task.accepted, 'timeframe didn\'t expire'
def test_min_max_fail(self, execute_task):
task = execute_task('test_min_max_fail')
assert not task.accepted
# Let 6 hours pass, timeframe should not even been started, as pdtv doesn't meet min_quality
age_series(hours=6)
task = execute_task('test_min_max_fail')
assert task.entries and not task.accepted
def test_min_max_pass(self, execute_task):
task = execute_task('test_min_max_pass')
assert not task.accepted
# Let 6 hours pass, timeframe should expire and accept hdtv copy
age_series(hours=6)
task = execute_task('test_min_max_pass')
assert task.find_entry('accepted', title='MM Test.S01E02.hdtv-FlexGet')
assert len(task.accepted) == 1
def test_qualities_fail(self, execute_task):
task = execute_task('test_qualities_fail')
assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet'), \
'should have accepted wanted quality'
assert len(task.accepted) == 1
# Let 6 hours pass, timeframe should not even been started, as we already have one of our qualities
age_series(hours=6)
task = execute_task('test_qualities_fail')
assert task.entries and not task.accepted
def test_qualities_pass(self, execute_task):
task = execute_task('test_qualities_pass')
assert not task.accepted, 'None of the qualities should have matched'
# Let 6 hours pass, timeframe should expire and accept 1080p copy
age_series(hours=6)
task = execute_task('test_qualities_pass')
assert task.find_entry('accepted', title='Q Test.S01E02.1080p-FlexGet')
assert len(task.accepted) == 1
def test_with_quality(self, execute_task):
task = execute_task('test_with_quality_1')
assert not task.accepted, 'Entry does not pass quality'
age_series(hours=6)
# Entry from first test feed should not pass quality
task = execute_task('test_with_quality_1')
assert not task.accepted, 'Entry does not pass quality'
# Timeframe should not yet have started
task = execute_task('test_with_quality_2')
assert not task.accepted, 'Timeframe should not yet have passed'
age_series(hours=6)
task = execute_task('test_with_quality_2')
assert task.accepted, 'Timeframe should have passed'
class TestBacklog(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
backlog:
mock:
- {title: 'Test.S01E01.hdtv-FlexGet'}
series:
- test: {timeframe: 6 hours}
"""
def testBacklog(self, manager, execute_task):
"""Series plugin: backlog"""
task = execute_task('backlog')
assert task.entries and not task.accepted, 'no entries at the start'
# simulate test going away from the task
del (manager.config['tasks']['backlog']['mock'])
age_series(hours=12)
task = execute_task('backlog')
assert task.accepted, 'backlog is not injecting episodes'
class TestManipulate(object):
"""Tests that it's possible to manipulate entries before they're parsed by series plugin"""
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_1:
mock:
- {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'}
series:
- test
test_2:
mock:
- {title: 'PREFIX: Test.S01E01.hdtv-FlexGet'}
series:
- test
manipulate:
- title:
extract: '^PREFIX: (.*)'
"""
def testManipulate(self, execute_task):
"""Series plugin: test manipulation priority"""
# should not work with the prefix
task = execute_task('test_1')
assert not task.accepted, 'series accepted even with prefix?'
assert not task.accepted, 'series rejecte even with prefix?'
task = execute_task('test_2')
assert task.accepted, 'manipulate failed to pre-clean title'
class TestFromGroup(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test:
mock:
- {title: '[Ignored] Test 12'}
- {title: '[FlexGet] Test 12'}
- {title: 'Test.13.HDTV-Ignored'}
- {title: 'Test.13.HDTV-FlexGet'}
- {title: 'Test.14.HDTV-Name'}
- {title: 'Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3'}
- {title: 'Test :: h264 10-bit | Softsubs (Ignore) | Episode 3'}
series:
- test: {from_group: [Name, FlexGet]}
"""
def test_from_group(self, execute_task):
"""Series plugin: test from_group"""
task = execute_task('test')
assert task.find_entry('accepted', title='[FlexGet] Test 12')
assert task.find_entry('accepted', title='Test.13.HDTV-FlexGet')
assert task.find_entry('accepted', title='Test.14.HDTV-Name')
assert task.find_entry('accepted', title='Test :: h264 10-bit | Softsubs (FlexGet) | Episode 3')
class TestBegin(object):
config = """
templates:
global:
parsing:
series: {{parser}}
eps:
mock:
- {title: 'WTest.S02E03.HDTV.XViD-FlexGet'}
- {title: 'W2Test.S02E03.HDTV.XViD-FlexGet'}
tasks:
season_id_test:
template: eps
series:
- WTest:
begin: S02
- W2Test:
begin: S03
before_ep_test:
template: eps
series:
- WTest:
begin: S02E05
- W2Test:
begin: S03E02
after_ep_test:
template: eps
series:
- WTest:
begin: S02E03
- W2Test:
begin: S02E01
before_seq_test:
mock:
- title: WTest.1.HDTV.XViD-FlexGet
- title: W2Test.13.HDTV.XViD-FlexGet
series:
- WTest:
begin: 2
- W2Test:
begin: 120
after_seq_test:
mock:
- title: WTest.2.HDTV.XViD-FlexGet
- title: W2Test.123.HDTV.XViD-FlexGet
series:
- WTest:
begin: 2
- W2Test:
begin: 120
before_date_test:
mock:
- title: WTest.2001.6.6.HDTV.XViD-FlexGet
- title: W2Test.12.30.2012.HDTV.XViD-FlexGet
series:
- WTest:
begin: '2009-05-05'
- W2Test:
begin: '2012-12-31'
after_date_test:
mock:
- title: WTest.2009.5.5.HDTV.XViD-FlexGet
- title: W2Test.1.1.2013.HDTV.XViD-FlexGet
series:
- WTest:
begin: '2009-05-05'
- W2Test:
begin: '2012-12-31'
test_advancement1:
mock:
- title: WTest.S01E01
series:
- WTest
test_advancement2:
mock:
- title: WTest.S03E01
series:
- WTest
test_advancement3:
mock:
- title: WTest.S03E01
series:
- WTest:
begin: S03E01
"""
def test_season_id(self, execute_task):
task = execute_task('season_id_test')
assert task.find_entry('accepted', title='WTest.S02E03.HDTV.XViD-FlexGet'), \
'Entry should have been accepted, it\'s after the begin episode'
assert task.find_entry('rejected', title='W2Test.S02E03.HDTV.XViD-FlexGet'), \
'Entry should have been rejected, it\'s before the begin episode'
def test_before_ep(self, execute_task):
task = execute_task('before_ep_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_ep(self, execute_task):
task = execute_task('after_ep_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_before_seq(self, execute_task):
task = execute_task('before_seq_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_seq(self, execute_task):
task = execute_task('after_seq_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_before_date(self, execute_task):
task = execute_task('before_date_test')
assert not task.accepted, 'No entries should have been accepted, they are before the begin episode'
def test_after_date(self, execute_task):
task = execute_task('after_date_test')
assert len(task.accepted) == 2, 'Entries should have been accepted, they are not before the begin episode'
def test_advancement(self, execute_task):
# Put S01E01 into the database as latest download
task = execute_task('test_advancement1')
assert task.accepted
# Just verify regular ep advancement would block S03E01
task = execute_task('test_advancement2')
assert not task.accepted, 'Episode advancement should have blocked'
# Make sure ep advancement doesn't block it when we've set begin to that ep
task = execute_task('test_advancement3')
assert task.accepted, 'Episode should have been accepted'
class TestSeriesPremiere(object):
config = """
templates:
global:
parsing:
series: {{parser}}
metainfo_series: yes
series_premiere: yes
tasks:
test:
mock:
- {title: 'Foobar.S01E01.PDTV-FlexGet'}
- {title: 'Foobar.S01E11.1080p-FlexGet'}
- {title: 'Foobar.S02E02.HR-FlexGet'}
"""
def testOnlyPremieres(self, execute_task):
"""Test series premiere"""
task = execute_task('test')
assert task.find_entry('accepted', title='Foobar.S01E01.PDTV-FlexGet',
series_name='Foobar', series_season=1,
series_episode=1), 'Series premiere should have been accepted'
assert len(task.accepted) == 1
# TODO: Add more tests, test interaction with series plugin and series_exists
class TestImportSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
timeframe_max:
configure_series:
settings:
propers: 12 hours
target: 720p
timeframe: 5 minutes
quality: "<=720p <=bluray"
from:
mock:
- title: the show
mock:
- title: the show s03e02 1080p bluray
- title: the show s03e02 hdtv
test_import_altnames:
configure_series:
from:
mock:
- {title: 'the show', configure_series_alternate_name: 'le show'}
mock:
- title: le show s03e03
"""
def test_timeframe_max(self, execute_task):
"""Tests configure_series as well as timeframe with max_quality."""
task = execute_task('timeframe_max')
assert not task.accepted, 'Entry shouldnot have been accepted on first run.'
age_series(minutes=6)
task = execute_task('timeframe_max')
assert task.find_entry('accepted', title='the show s03e02 hdtv'), \
'hdtv should have been accepted after timeframe.'
def test_import_altnames(self, execute_task):
"""Tests configure_series with alternate_name."""
task = execute_task('test_import_altnames')
entry = task.find_entry(title='le show s03e03')
assert entry.accepted, 'entry matching series alternate name should have been accepted.'
assert entry['series_name'] == 'the show', 'entry series should be set to the main name'
class TestIDTypes(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
all_types:
series:
- episode
- seasonless episode
- date
- sequence
- stupid id:
id_regexp: (\\dcat)
mock:
- title: episode S03E04
- title: episode 3x05
- title: date 2011.4.3 other crap hdtv
- title: date 4.5.11
- title: sequence 003
- title: sequence 4
- title: stupid id 3cat
- title: seasonless episode e01
"""
def test_id_types(self, execute_task):
task = execute_task('all_types')
for entry in task.entries:
assert entry['series_name'], '%s not parsed by series plugin' % entry['title']
assert entry['series_id_type'] in entry['series_name']
class TestCaseChange(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
first:
mock:
- title: theshow s02e04
series:
- TheShow
second:
mock:
- title: thEshoW s02e04 other
series:
- THESHOW
"""
def test_case_change(self, execute_task):
task = execute_task('first')
# Make sure series_name uses case from config, make sure episode is accepted
assert task.find_entry('accepted', title='theshow s02e04', series_name='TheShow')
task = execute_task('second')
# Make sure series_name uses new case from config, make sure ep is rejected because we have a copy
assert task.find_entry('rejected', title='thEshoW s02e04 other', series_name='THESHOW')
class TestInvalidSeries(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
blank:
mock:
- title: whatever
series:
- '':
quality: 720p
"""
def test_blank_series(self, execute_task):
"""Make sure a blank series doesn't crash."""
task = execute_task('blank')
assert not task.aborted, 'Task should not have aborted'
class TestDoubleEps(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
test_double1:
mock:
- title: double S01E02-E03
series:
- double
test_double2:
mock:
- title: double S01E03
series:
- double
"""
def test_double(self, execute_task):
# First should be accepted
task = execute_task('test_double1')
assert task.find_entry('accepted', title='double S01E02-E03')
# We already got ep 3 as part of double, should not be accepted
task = execute_task('test_double2')
assert not task.find_entry('accepted', title='double S01E03')
class TestAutoLockin(object):
config = """
templates:
global:
parsing:
series: {{parser}}
series:
- FooBar
- BarFood
tasks:
try_date_1:
mock:
- title: FooBar 2012-10-10 HDTV
lock_ep:
mock:
- title: FooBar S01E01 HDTV
- title: FooBar S01E02 HDTV
- title: FooBar S01E03 HDTV
try_date_2:
mock:
- title: FooBar 2012-10-11 HDTV
test_special_lock:
mock:
- title: BarFood christmas special HDTV
- title: BarFood easter special HDTV
- title: BarFood haloween special HDTV
- title: BarFood bad special HDTV
try_reg:
mock:
- title: BarFood S01E01 HDTV
- title: BarFood 2012-9-9 HDTV
"""
def test_ep_lockin(self, execute_task):
task = execute_task('try_date_1')
assert task.find_entry('accepted', title='FooBar 2012-10-10 HDTV'), \
'dates should be accepted before locked in on an identifier type'
task = execute_task('lock_ep')
assert len(task.accepted) == 3, 'All ep mode episodes should have been accepted'
task = execute_task('try_date_2')
assert not task.find_entry('accepted', title='FooBar 2012-10-11 HDTV'), \
'dates should not be accepted after series has locked in to ep mode'
def test_special_lock(self, execute_task):
"""Make sure series plugin does not lock in to type 'special'"""
task = execute_task('test_special_lock')
assert len(task.accepted) == 4, 'All specials should have been accepted'
task = execute_task('try_reg')
assert len(task.accepted) == 2, 'Specials should not have caused episode type lock-in'
class TestReruns(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
one_accept:
mock:
- title: the show s01e01
- title: the show s01e01 different
series:
- the show
rerun: 2
mock_output: yes
"""
def test_one_accept(self, execute_task):
task = execute_task('one_accept')
assert len(task.mock_output) == 1, \
'should have accepted once!: %s' % ', '.join(e['title'] for e in task.mock_output)
class TestSpecials(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
preferspecials:
mock:
- title: the show s03e04 special
series:
- the show:
prefer_specials: True
nopreferspecials:
mock:
- title: the show s03e05 special
series:
- the show:
prefer_specials: False
assumespecial:
mock:
- title: the show SOMETHING
series:
- the show:
assume_special: True
noassumespecial:
mock:
- title: the show SOMETHING
series:
- the show:
assume_special: False
special_looks_like_season_pack:
mock:
- title: Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget
series:
- Doctor Who
"""
def test_prefer_specials(self, execute_task):
# Test that an entry matching both ep and special is flagged as a special when prefer_specials is True
task = execute_task('preferspecials')
entry = task.find_entry('accepted', title='the show s03e04 special')
assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged a special was not.'
def test_not_prefer_specials(self, execute_task):
# Test that an entry matching both ep and special is flagged as an ep when prefer_specials is False
task = execute_task('nopreferspecials')
entry = task.find_entry('accepted', title='the show s03e05 special')
assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged a special was.'
def test_assume_special(self, execute_task):
# Test that an entry with no ID found gets flagged as a special and accepted if assume_special is True
task = execute_task('assumespecial')
entry = task.find_entry(title='the show SOMETHING')
assert entry.get('series_id_type') == 'special', 'Entry which should have been flagged as a special was not.'
assert entry.accepted, 'Entry which should have been accepted was not.'
def test_not_assume_special(self, execute_task):
# Test that an entry with no ID found does not get flagged as a special and accepted if assume_special is False
task = execute_task('noassumespecial')
entry = task.find_entry(title='the show SOMETHING')
assert entry.get('series_id_type') != 'special', 'Entry which should not have been flagged as a special was.'
assert not entry.accepted, 'Entry which should not have been accepted was.'
def test_special_looks_like_a_season_pack(self, execute_task):
"""Make sure special episodes are not being parsed as season packs"""
task = execute_task('special_looks_like_season_pack')
entry = task.find_entry(title='Doctor.Who.S07.Special.The.Science.of.Doctor.Who.WS.XviD-Flexget')
assert entry.get('series_id_type') == 'special', 'Entry should have been flagged as a special'
assert not entry['season_pack'], 'Entry should not have been flagged as a season pack'
assert entry.accepted, 'Entry which should not have been accepted was.'
class TestAlternateNames(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
alternate_name:
series:
- Some Show:
begin: S01E01
alternate_name: Other Show
another_alternate_name:
series:
- Some Show:
alternate_name: Good Show
set_other_alternate_name:
mock:
- title: Third.Show.S01E01
- title: Other.Show.S01E01
series:
- Some Show:
alternate_name: Third Show
rerun: 0
duplicate_names_in_different_series:
series:
- First Show:
begin: S01E01
alternate_name: Third Show
- Second Show:
begin: S01E01
alternate_name: Third Show
"""
def test_set_alternate_name(self, execute_task):
# Tests that old alternate names are not kept in the database.
task = execute_task('alternate_name')
task = execute_task('set_other_alternate_name')
assert task.find_entry('accepted', title='Third.Show.S01E01'), \
'A new alternate name should have been associated with the series.'
assert task.find_entry('undecided', title='Other.Show.S01E01'), \
'The old alternate name for the series is still present.'
def test_duplicate_alternate_names_in_different_series(self, execute_task):
with pytest.raises(TaskAbort) as ex:
execute_task('duplicate_names_in_different_series')
# only test that the reason is about alternate names, not which names.
reason = 'Error adding alternate name'
assert ex.value.reason[:27] == reason, \
'Wrong reason for task abortion. Should be about duplicate alternate names.'
# Test the DB behaves like we expect ie. alternate names cannot
def test_alternate_names_are_removed_from_db(self, execute_task):
from flexget.manager import Session
with Session() as session:
execute_task('alternate_name')
# test the current state of alternate names
assert len(session.query(db.AlternateNames).all()) == 1, 'There should be one alternate name present.'
assert session.query(db.AlternateNames).first().alt_name == 'Other Show', \
'Alternate name should have been Other Show.'
# run another task that overwrites the alternate names
execute_task('another_alternate_name')
assert len(session.query(db.AlternateNames).all()) == 1, \
'The old alternate name should have been removed from the database.'
assert session.query(db.AlternateNames).first().alt_name == 'Good Show', \
'The alternate name in the database should be the new one, Good Show.'
class TestCLI(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
learn_series:
series:
- Some Show
- Other Show
mock:
- title: Some Series S01E01
- title: Other Series S01E02
"""
def test_series_list(self, manager, execute_task):
"""Very rudimentary test, mostly makes sure this doesn't crash."""
execute_task('learn_series')
options = get_parser().parse_args(['series', 'list', '--porcelain'])
buffer = StringIO()
with capture_output(buffer, loglevel='error'):
manager.handle_cli(options=options)
lines = buffer.getvalue().split('\n')
assert all(any(line.lstrip().startswith(series) for line in lines) for series in ['Some Show', 'Other Show'])
class TestSeriesRemove(object):
config = """
templates:
global:
parsing:
series: {{parser}}
tasks:
get_episode:
seen: local
series:
- My Show
mock:
- title: My Show S01E01 1080p
- title: My Show S01E01 720p
remove_episode:
seen: no
mock:
- title: My Show S01E01
series_name: My Show
series_id: S01E01
accept_all: yes
series_remove: yes
"""
def test_remove_episode(self, execute_task):
task = execute_task('get_episode')
assert len(task.accepted) == 1
first_rls = task.accepted[0]
task = execute_task('get_episode')
assert not task.accepted, 'series plugin duplicate blocking not working?'
task = execute_task('remove_episode')
task = execute_task('get_episode')
assert len(task.accepted) == 1, 'new release not accepted after forgetting ep'
assert task.accepted[0] != first_rls, 'same release accepted on second run'
class TestSeriesSeasonPack(object):
_config = """
templates:
global:
parsing:
series: internal
series:
- foo:
season_packs: yes
- bar:
season_packs: yes
tracking: backfill
- baz:
season_packs: 3
- boo:
season_packs: always
- bla:
season_packs: only
- bro:
season_packs:
threshold: 1
reject_eps: yes
tasks:
multiple_formats:
mock:
- title: foo.s01.720p-flexget
- title: foo.2xALL.720p-flexget
foo_s01:
mock:
- title: foo.s01.720p-flexget
foo_s02:
mock:
- title: foo.s02.720p-flexget
foo_s03:
mock:
- title: foo.s03.720p-flexget
foo_s01ep1:
mock:
- title: foo.s01e1.720p-flexget
foo_s02ep1:
mock:
- title: foo.s02e1.720p-flexget
season_pack_priority:
mock:
- title: foo.s01e1.720p-flexget
- title: foo.s01e2.720p-flexget
- title: foo.s01e3.720p-flexget
- title: foo.s01e4.720p-flexget
- title: foo.s01e5.720p-flexget
- title: foo.s01.720p-flexget
respect_begin:
series:
- bar:
begin: s02e01
season_packs: yes
mock:
- title: bar.s01.720p-flexget
- title: bar.s02.720p-flexget
several_seasons:
mock:
- title: foo.s03.720p-flexget
- title: foo.s07.720p-flexget
- title: foo.s03.1080p-flexget
- title: foo.s06.720p-flexget
- title: foo.s09.720p-flexget
test_backfill_1:
mock:
- title: bar.s03.720p-flexget
test_backfill_2:
mock:
- title: bar.s02.720p-flexget
test_backfill_3:
mock:
- title: bar.s03e01.720p-flexget
test_backfill_4:
mock:
- title: bar.s02e01.1080p-flexget
test_specific_season_pack_threshold_1:
mock:
- title: baz.s01e01.720p-flexget
- title: baz.s01e02.720p-flexget
- title: baz.s01e03.720p-flexget
test_specific_season_pack_threshold_2:
mock:
- title: baz.s01.720p-flexget
test_specific_season_pack_threshold_3:
mock:
- title: baz.s01e01.720p-flexget
- title: baz.s01e02.720p-flexget
- title: baz.s01e03.720p-flexget
- title: baz.s01e04.720p-flexget
test_always_get_season_pack_1:
mock:
- title: boo.s01e01.720p-flexget
- title: boo.s01e02.720p-flexget
- title: boo.s01e03.720p-flexget
- title: boo.s01e04.720p-flexget
test_always_get_season_pack_2:
mock:
- title: boo.s01.720p-flexget
test_only_get_season_packs:
mock:
- title: bla.s01.720p-flexget
- title: bla.s02e01.720p-flexget
test_proper_season_pack:
mock:
- title: foo.s01.720p-flexget
- title: foo.s01.720p.proper-flexget
test_proper_season_pack_2:
mock:
- title: foo.s01.720p-flexget
test_proper_season_pack_3:
mock:
- title: foo.s01.720p.proper-flexget
test_all_series:
mock:
- title: show.name.s01.720p.HDTV-Group
all_series:
season_packs: yes
test_with_dict_config_1:
mock:
- title: bro.s01e01.720p.HDTV-Flexget
- title: bro.s01.720p.HDTV-Flexget
test_with_dict_config_2:
mock:
- title: bro.s02.720p.HDTV-Flexget
"""
@pytest.fixture()
def config(self):
"""Overrides outer config fixture since season pack support does not work with guessit parser"""
return self._config
def test_season_pack_simple(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
def test_basic_tracking(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s01ep1')
assert len(task.accepted) == 0
task = execute_task('foo_s02ep1')
assert len(task.accepted) == 1
def test_season_pack_takes_priority(self, execute_task):
task = execute_task('season_pack_priority')
assert len(task.accepted) == 1
entry = task.find_entry(title='foo.s01.720p-flexget')
assert entry.accepted
def test_respect_begin(self, execute_task):
task = execute_task('respect_begin')
assert len(task.accepted) == 1
entry = task.find_entry(title='bar.s02.720p-flexget')
assert entry.accepted
def test_tracking_rules_old_eps(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s02')
assert len(task.accepted) == 1
task = execute_task('foo_s01ep1')
assert not task.accepted
def test_tracking_rules_old_season(self, execute_task):
task = execute_task('foo_s02')
assert len(task.accepted) == 1
task = execute_task('foo_s01')
assert not task.accepted
def test_tracking_rules_new_season(self, execute_task):
task = execute_task('foo_s01')
assert len(task.accepted) == 1
task = execute_task('foo_s03')
assert not task.accepted
def test_several_seasons(self, execute_task):
task = execute_task('several_seasons')
assert len(task.accepted) == 4
def test_multiple_formats(self, execute_task):
task = execute_task('multiple_formats')
assert len(task.accepted) == 2
def test_backfill(self, execute_task):
task = execute_task('test_backfill_1')
assert len(task.accepted) == 1
task = execute_task('test_backfill_2')
assert len(task.accepted) == 1
task = execute_task('test_backfill_3')
assert not task.accepted
task = execute_task('test_backfill_4')
assert not task.accepted
def test_default_threshold(self, execute_task):
task = execute_task('foo_s01ep1')
assert len(task.accepted) == 1
task = execute_task('foo_s01')
assert len(task.accepted) == 0
def test_specific_season_pack_threshold_positive(self, execute_task):
task = execute_task('test_specific_season_pack_threshold_1')
assert len(task.accepted) == 3
task = execute_task('test_specific_season_pack_threshold_2')
assert len(task.accepted) == 1
def test_specific_season_pack_threshold_negative(self, execute_task):
task = execute_task('test_specific_season_pack_threshold_3')
assert len(task.accepted) == 4
task = execute_task('test_specific_season_pack_threshold_2')
assert not task.accepted
def test_loose_threshold(self, execute_task):
task = execute_task('test_always_get_season_pack_1')
assert len(task.accepted) == 4
task = execute_task('test_always_get_season_pack_2')
assert len(task.accepted) == 1
def test_exclusive(self, execute_task):
task = execute_task('test_only_get_season_packs')
assert len(task.accepted) == 1
entry = task.find_entry(title='bla.s01.720p-flexget')
assert entry.accepted
def test_proper_season_pack(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('test_proper_season_pack')
assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget')
def test_proper_season_pack_2(self, execute_task):
"""Series plugin: proper available immediately"""
task = execute_task('test_proper_season_pack_2')
assert task.find_entry('accepted', title='foo.s01.720p-flexget')
task = execute_task('test_proper_season_pack_3')
assert task.find_entry('accepted', title='foo.s01.720p.proper-flexget')
def test_all_series(self, execute_task):
task = execute_task('test_all_series')
assert task.find_entry('accepted', title='show.name.s01.720p.HDTV-Group')
def test_advanced_config(self, execute_task):
task = execute_task('test_with_dict_config_1')
assert not task.find_entry('accepted', title='bro.s01e01.720p.HDTV-Flexget')
assert task.find_entry('accepted', title='bro.s01.720p.HDTV-Flexget')
execute_task('test_with_dict_config_2',
options={'inject': [Entry(title='bro.s02e01.720p.HDTV-Flexget', url='')],
'immortal': True})
task = execute_task('test_with_dict_config_2')
assert task.find_entry('accepted', title='bro.s02.720p.HDTV-Flexget')
class TestSeriesDDAudio(object):
_config = """
templates:
global:
parsing:
series: internal
tasks:
min_quality:
mock:
- {title: 'MinQATest.S01E01.720p.XViD.DD5.1-FlexGet'}
- {title: 'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'}
series:
- MinQATest:
quality: ">dd5.1"
max_quality:
mock:
- {title: 'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'}
- {title: 'MaxQATest.S01E01.720p.XViD.DD+5.1-FlexGet'}
series:
- MaxQATest:
quality: "<=dd5.1"
test_channels:
mock:
- {title: 'Channels.S01E01.1080p.HDTV.DD+2.0-FlexGet'}
- {title: 'Channels.S01E01.1080p.HDTV.DD+5.1-FlexGet'}
- {title: 'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'}
series:
- Channels:
quality: dd+5.1
"""
@pytest.fixture()
def config(self):
"""Overrides outer config fixture since DD+ and arbitrary channels support does not work with guessit parser"""
return self._config
def test_min_quality(self, execute_task):
"""Series plugin: min_quality"""
task = execute_task('min_quality')
assert task.find_entry('accepted', title='MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet'), \
'MinQATest.S01E01.720p.XViD.DDP5.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only two'
def test_max_quality(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('max_quality')
assert task.find_entry('accepted', title='MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet'), \
'MaxQATest.S01E01.720p.XViD.DD5.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
def test_channels(self, execute_task):
"""Series plugin: max_quality"""
task = execute_task('test_channels')
assert task.find_entry(title='Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet'), \
'Channels.S01E01.1080p.HDTV.DD+7.1-FlexGet should have been accepted'
assert len(task.accepted) == 1, 'should have accepted only one'
| [
[
[
23,
39
]
],
[
[
41,
49
]
],
[
[
51,
66
]
],
[
[
88,
89
]
],
[
[
163,
171
],
[
73701,
73709
]
],
[
[
180,
186
],
[
626,
632
],
[
15179,
15185
],
[
78966,
78972
],
[
85053,
85059
],
[
71700,
71706
]
],
[
[
206,
214
],
[
848,
856
]
],
[
[
242,
247
],
[
15437,
15442
],
[
15510,
15515
],
[
83842,
83847
]
],
[
[
275,
289
],
[
73725,
73739
]
],
[
[
318,
328
],
[
73625,
73635
]
],
[
[
330,
337
],
[
476,
483
]
],
[
[
363,
372
],
[
71714,
71723
]
],
[
[
411,
413
],
[
504,
506
],
[
72414,
72416
],
[
72525,
72527
],
[
72798,
72800
],
[
72950,
72952
]
],
[
[
420,
430
],
[
26319,
26329
],
[
48235,
48245
],
[
48416,
48426
],
[
48777,
48787
],
[
49108,
49118
],
[
49686,
49696
],
[
50069,
50079
],
[
50431,
50441
],
[
50808,
50818
],
[
51570,
51580
],
[
60432,
60442
]
],
[
[
735,
741
]
],
[
[
1082,
1093
]
],
[
[
7223,
7235
]
],
[
[
8853,
8869
]
],
[
[
15806,
15828
]
],
[
[
22065,
22089
]
],
[
[
22892,
22903
]
],
[
[
30400,
30416
]
],
[
[
31880,
31894
]
],
[
[
34544,
34557
]
],
[
[
39877,
39897
]
],
[
[
40718,
40735
]
],
[
[
42102,
42120
]
],
[
[
42863,
42872
]
],
[
[
44590,
44603
]
],
[
[
50948,
50959
]
],
[
[
51704,
51718
]
],
[
[
52755,
52768
]
],
[
[
53858,
53867
]
],
[
[
58439,
58457
]
],
[
[
59354,
59370
]
],
[
[
61056,
61067
]
],
[
[
62039,
62053
]
],
[
[
62943,
62960
]
],
[
[
63451,
63464
]
],
[
[
64239,
64253
]
],
[
[
66118,
66128
]
],
[
[
66722,
66734
]
],
[
[
70125,
70143
]
],
[
[
73099,
73106
]
],
[
[
73987,
74003
]
],
[
[
75123,
75143
]
],
[
[
84087,
84104
]
]
] |
#!/usr/bin/env python
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=unused-argument
import mock
def create(*args, **kwargs):
"""
Create a Mock object that imitates a valid Cloud module.
:param args: Not used
:param kwargs: Not used
:return: mock.MagicMock
"""
attrs = {"client.get_suite.return_value": True, "get_campaign_id.side_effect": [True, KeyError],
"get_campaigns.return_value": True, "update_testcase.return_value": True,
"upload_results.side_effect": [True, False]}
mock_module = mock.MagicMock()
mock_module.configure_mock(**attrs)
return mock_module
| [
[
[
622,
626
],
[
1082,
1086
]
],
[
[
633,
639
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import uuid
from random import sample
import cassandra.concurrent
from cassandra.cluster import Cluster
from cassandra.policies import RoundRobinPolicy, TokenAwarePolicy
from solrcloudpy import SolrConnection, SearchOptions
from six.moves import input
solr_connection = None
solr_collection = None
SOLR_UNIQUE_KEY = None
cassandra_cluster = None
cassandra_session = None
cassandra_table = None
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().handlers[0].setFormatter(
logging.Formatter(fmt="%(asctime)s %(levelname)s:%(name)s: %(message)s", datefmt="%Y-%m-%dT%H:%M:%S"))
def init(args):
global solr_connection
solr_connection = SolrConnection(args.solr)
global solr_collection
solr_collection = solr_connection[args.collection]
global SOLR_UNIQUE_KEY
SOLR_UNIQUE_KEY = args.solrIdField
dc_policy = RoundRobinPolicy()
token_policy = TokenAwarePolicy(dc_policy)
global cassandra_cluster
cassandra_cluster = Cluster(contact_points=args.cassandra, port=args.cassandraPort,
protocol_version=int(args.cassandraProtocolVersion),
load_balancing_policy=token_policy)
global cassandra_session
cassandra_session = cassandra_cluster.connect(keyspace=args.cassandraKeyspace)
global cassandra_table
cassandra_table = args.cassandraTable
def delete_by_query(args):
if args.query:
se = SearchOptions()
se.commonparams.q(args.query) \
.fl(SOLR_UNIQUE_KEY) \
.fl('id')
for fq in args.filterquery if args.filterquery is not None else []:
se.commonparams.fq(fq)
query = se
elif args.jsonparams:
se = SearchOptions(**json.loads(args.jsonparams))
se.commonparams.fl(SOLR_UNIQUE_KEY) \
.fl('id')
query = se
else:
raise RuntimeError("either query or jsonparams is required")
if check_query(query):
logging.info("Collecting tiles ....")
solr_docs = do_solr_query(query)
if confirm_delete(len(solr_docs)):
deleted_ids = do_delete(solr_docs, query)
logging.info("Deleted tile IDs %s" % json.dumps([str(doc_id) for doc_id in deleted_ids], indent=2))
else:
logging.info("Exiting")
return
else:
logging.info("Exiting")
return
def confirm_delete(num_found):
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
while do_continue not in ['y', 'n']:
do_continue = input(
"This action will delete %s record(s) from SOLR and Cassandra. Are you sure you want to Continue? y/n: " % num_found)
return do_continue == 'y'
def check_query(query):
solr_response = solr_collection.search(query)
num_found = solr_response.result.response.numFound
if num_found == 0:
logging.info("Query returned 0 results")
return False
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
while do_continue not in ['y', 'n', 's', '']:
do_continue = input("Query found %s matching documents. Continue? [y]/n/(s)ample: " % num_found)
if do_continue == 'y' or do_continue == '':
return True
elif do_continue == 'n':
return False
else:
se = SearchOptions()
se.commonparams.q('%s:%s' % (SOLR_UNIQUE_KEY, sample(solr_response.result.response.docs, 1)[0][SOLR_UNIQUE_KEY]))
logging.info(json.dumps(solr_collection.search(se).result.response.docs[0], indent=2))
return check_query(query)
def do_solr_query(query):
doc_ids = []
next_cursor_mark = "*"
query.commonparams.sort('%s asc' % SOLR_UNIQUE_KEY)
while True:
query.commonparams.remove_param('cursorMark')
query.commonparams.add_params(cursorMark=next_cursor_mark)
solr_response = solr_collection.search(query)
try:
result_next_cursor_mark = solr_response.result.nextCursorMark
except AttributeError:
# No Results
return []
if result_next_cursor_mark == next_cursor_mark:
break
else:
next_cursor_mark = solr_response.result.nextCursorMark
doc_ids.extend([uuid.UUID(doc['id']) for doc in solr_response.result.response.docs])
return doc_ids
def do_delete(doc_ids, query):
logging.info("Executing Cassandra delete...")
delete_from_cassandra(doc_ids)
logging.info("Executing Solr delete...")
delete_from_solr(query)
return doc_ids
def delete_from_cassandra(doc_ids):
statement = cassandra_session.prepare("DELETE FROM %s WHERE tile_id=?" % cassandra_table)
results = cassandra.concurrent.execute_concurrent_with_args(cassandra_session, statement,
[(doc_id,) for doc_id in doc_ids])
for (success, result) in results:
if not success:
logging.warning("Could not delete tile %s" % result)
def delete_from_solr(query):
solr_collection.delete(query, commit=False)
solr_collection.commit()
def parse_args():
parser = argparse.ArgumentParser(description='Delete data from NEXUS using a Solr Query',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--solr',
help='The url of the SOLR server.',
required=True,
metavar='127.0.0.1:8983')
parser.add_argument('--collection',
help='The name of the SOLR collection.',
required=True,
metavar='nexustiles')
parser.add_argument('--solrIdField',
help='The name of the unique ID field for this collection.',
required=False,
default='solr_id_s',
metavar='solr_id_s')
parser.add_argument('--cassandra',
help='The hostname(s) or IP(s) of the Cassandra server(s).',
required=True,
nargs='+',
metavar=('127.0.0.100', '127.0.0.101'))
parser.add_argument('-k', '--cassandraKeyspace',
help='The Cassandra keyspace.',
required=True,
metavar='nexustiles')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-q', '--query',
help='The ''q'' parameter passed to SOLR Search',
metavar='*:*')
group.add_argument('--jsonparams',
help='Full query prameters formatted as JSON')
parser.add_argument('-fq', '--filterquery',
help='The ''fq'' parameter passed to SOLR Search. Only used if --jsonparams is not provided',
required=False,
nargs='+')
parser.add_argument('-t', '--cassandraTable',
help='The name of the cassandra table.',
required=False,
default='sea_surface_temp')
parser.add_argument('-p', '--cassandraPort',
help='The port used to connect to Cassandra.',
required=False,
default='9042')
parser.add_argument('-pv', '--cassandraProtocolVersion',
help='The version of the Cassandra protocol the driver should use.',
required=False,
choices=['1', '2', '3', '4', '5'],
default='3')
return parser.parse_args()
if __name__ == "__main__":
the_args = parse_args()
init(the_args)
delete_by_query(the_args)
| [
[
[
788,
796
],
[
6096,
6104
],
[
6230,
6238
]
],
[
[
804,
808
],
[
2580,
2584
],
[
3037,
3041
],
[
4427,
4431
]
],
[
[
816,
823
],
[
1223,
1230
],
[
1245,
1252
],
[
1274,
1281
],
[
1288,
1295
],
[
1338,
1345
],
[
2811,
2818
],
[
3000,
3007
],
[
3126,
3133
],
[
3187,
3194
],
[
3806,
3813
],
[
4414,
4421
],
[
5328,
5335
],
[
5413,
5420
],
[
5902,
5909
]
],
[
[
831,
835
],
[
5202,
5206
]
],
[
[
855,
861
],
[
4338,
4344
]
],
[
[
870,
890
],
[
5648,
5657
]
],
[
[
921,
928
],
[
1820,
1827
]
],
[
[
960,
976
],
[
1700,
1716
]
],
[
[
978,
994
],
[
1738,
1754
]
],
[
[
1019,
1033
],
[
1509,
1523
]
],
[
[
1035,
1048
],
[
2280,
2293
],
[
2564,
2577
],
[
4268,
4281
]
],
[
[
1072,
1077
],
[
3277,
3282
],
[
3474,
3479
],
[
3887,
3892
],
[
4043,
4048
]
],
[
[
1079,
1094
]
],
[
[
1102,
1117
],
[
3688,
3703
],
[
4438,
4453
],
[
4825,
4840
],
[
5990,
6005
],
[
6038,
6053
]
],
[
[
1125,
1140
],
[
2352,
2367
],
[
2636,
2651
],
[
4321,
4336
],
[
4387,
4402
],
[
4647,
4662
]
],
[
[
1149,
1166
]
],
[
[
1174,
1191
],
[
5555,
5572
],
[
5698,
5715
]
],
[
[
1199,
1214
],
[
5616,
5631
]
],
[
[
1448,
1452
],
[
8720,
8724
]
],
[
[
2225,
2240
],
[
8739,
8754
]
],
[
[
3232,
3246
],
[
2902,
2916
]
],
[
[
3648,
3659
],
[
2783,
2794
],
[
4516,
4527
]
],
[
[
4541,
4554
],
[
2869,
2882
]
],
[
[
5297,
5306
],
[
2960,
2969
]
],
[
[
5507,
5528
],
[
5378,
5399
]
],
[
[
5961,
5977
],
[
5458,
5474
]
],
[
[
6069,
6079
],
[
8703,
8713
]
],
[
[
8692,
8700
],
[
8725,
8733
],
[
8755,
8763
]
],
[
[
1491,
1506
],
[
1584,
1599
]
],
[
[
1566,
1581
]
],
[
[
1648,
1663
]
],
[
[
1800,
1817
],
[
2090,
2107
]
],
[
[
2070,
2087
]
],
[
[
2181,
2196
]
]
] |
from decimal import *
# Purpose: read in SSDEEP output and print findings.
# Author: Tanner G.
def main():
file = open("ssdeep_comparison", "r")
# read past first line of output
file.readline()
filea_data = file.readline()
fileb_data = file.readline()
file.close()
totalCount = 0
similarities = 0
index = 0
max_len = len(filea_data)
while index < max_len:
totalCount +=1
if filea_data[index] == "," or fileb_data[index] == ",":
index = max_len
totalCount -=1
break
elif filea_data[index] == fileb_data[index]:
similarities +=1
index +=1
else:
index+=1
continue
print("------------------")
print("Stats from ssdeep:")
print("------------------")
print("Total Count: " + str(totalCount))
print("Similarities: " + str(similarities))
ratio = (Decimal(similarities)/Decimal(totalCount) * 100)
print ("Hash similarity detected: " + str(ratio)[:5] + "%")
outputFile = open("ssdeep_stats", "w")
outputFile.write("count:"+str(totalCount)+",ratio:"+str(ratio)[:5]+"\n")
outputFile.close()
if __name__ == "__main__":
main()
| [
[
[
20,
21
],
[
799,
806
],
[
821,
828
]
],
[
[
101,
105
],
[
1072,
1076
]
]
] |
#!/usr/bin/env python
import cPickle
from functools import wraps
def redis_lru(capacity=5000, slice=slice(None)):
def decorator(func):
cache_keys = "lru:keys:%s" % (func.__name__,)
cache_vals = "lru:vals:%s" % (func.__name__,)
cache_hits = "lru:hits:%s" % (func.__name__,)
cache_miss = "lru:miss:%s" % (func.__name__,)
lvars = [None] # closure mutable
def add(key, value):
eject()
conn = lvars[0]
conn.incr(cache_miss)
conn.hset(cache_vals, key, cPickle.dumps(value))
conn.zadd(cache_keys, 0, key)
return value
def get(key):
conn = lvars[0]
value = conn.hget(cache_vals, key)
if value:
conn.incr(cache_hits)
conn.zincrby(cache_keys, key, 1.0)
value = cPickle.loads(value)
return value
def eject():
conn = lvars[0]
count = min((capacity / 10) or 1, 1000)
if conn.zcard(cache_keys) >= capacity:
eject = conn.zrange(cache_keys, 0, count)
conn.zremrangebyrank(cache_keys, 0, count)
conn.hdel(cache_vals, *eject)
@wraps(func)
def wrapper(*args, **kwargs):
conn = lvars[0]
if conn:
items = args + tuple(sorted(kwargs.items()))
key = cPickle.dumps(items[slice])
return get(key) or add(key, func(*args, **kwargs))
else:
return func(*args, **kwargs)
def info():
conn = lvars[0]
size = int(conn.zcard(cache_keys) or 0)
hits, misses = int(conn.get(cache_hits) or 0), int(conn.get(cache_miss) or 0)
return hits, misses, capacity, size
def clear():
conn = lvars[0]
conn.delete(cache_keys, cache_vals)
conn.delete(cache_hits, cache_miss)
def init(conn):
lvars[0] = conn
wrapper.init = init
wrapper.info = info
wrapper.clear = clear
return wrapper
return decorator
| [
[
[
29,
36
],
[
480,
487
],
[
718,
725
],
[
1153,
1160
]
],
[
[
59,
64
],
[
1019,
1024
]
],
[
[
70,
79
]
]
] |
swizzle_table = [
[
b"\x00",
b"\x01",
b"\x40",
b"\x03",
b"\x10",
b"\x21",
b"\x50",
b"\x23",
b"\x04",
b"\x09",
b"\x44",
b"\x0b",
b"\x14",
b"\x29",
b"\x54",
b"\x2b",
],
[
b"\x08",
b"\x11",
b"\x48",
b"\x13",
b"\x18",
b"\x31",
b"\x58",
b"\x33",
b"\x0c",
b"\x19",
b"\x4c",
b"\x1b",
b"\x1c",
b"\x39",
b"\x5c",
b"\x3b",
],
[
b"\x80",
b"\x05",
b"\xc0",
b"\x07",
b"\x90",
b"\x25",
b"\xd0",
b"\x27",
b"\x84",
b"\x0d",
b"\xc4",
b"\x0f",
b"\x94",
b"\x2d",
b"\xd4",
b"\x2f",
],
[
b"\x88",
b"\x15",
b"\xc8",
b"\x17",
b"\x98",
b"\x35",
b"\xd8",
b"\x37",
b"\x8c",
b"\x1d",
b"\xcc",
b"\x1f",
b"\x9c",
b"\x3d",
b"\xdc",
b"\x3f",
],
[
b"\x02",
b"\x41",
b"\x42",
b"\x43",
b"\x12",
b"\x61",
b"\x52",
b"\x63",
b"\x06",
b"\x49",
b"\x46",
b"\x4b",
b"\x16",
b"\x69",
b"\x56",
b"\x6b",
],
[
b"\x0a",
b"\x51",
b"\x4a",
b"\x53",
b"\x1a",
b"\x71",
b"\x5a",
b"\x73",
b"\x0e",
b"\x59",
b"\x4e",
b"\x5b",
b"\x1e",
b"\x79",
b"\x5e",
b"\x7b",
],
[
b"\x82",
b"\x45",
b"\xc2",
b"\x47",
b"\x92",
b"\x65",
b"\xd2",
b"\x67",
b"\x86",
b"\x4d",
b"\xc6",
b"\x4f",
b"\x96",
b"\x6d",
b"\xd6",
b"\x6f",
],
[
b"\x8a",
b"\x55",
b"\xca",
b"\x57",
b"\x9a",
b"\x75",
b"\xda",
b"\x77",
b"\x8e",
b"\x5d",
b"\xce",
b"\x5f",
b"\x9e",
b"\x7d",
b"\xde",
b"\x7f",
],
[
b"\x20",
b"\x81",
b"\x60",
b"\x83",
b"\x30",
b"\xa1",
b"\x70",
b"\xa3",
b"\x24",
b"\x89",
b"\x64",
b"\x8b",
b"\x34",
b"\xa9",
b"\x74",
b"\xab",
],
[
b"\x28",
b"\x91",
b"\x68",
b"\x93",
b"\x38",
b"\xb1",
b"\x78",
b"\xb3",
b"\x2c",
b"\x99",
b"\x6c",
b"\x9b",
b"\x3c",
b"\xb9",
b"\x7c",
b"\xbb",
],
[
b"\xa0",
b"\x85",
b"\xe0",
b"\x87",
b"\xb0",
b"\xa5",
b"\xf0",
b"\xa7",
b"\xa4",
b"\x8d",
b"\xe4",
b"\x8f",
b"\xb4",
b"\xad",
b"\xf4",
b"\xaf",
],
[
b"\xa8",
b"\x95",
b"\xe8",
b"\x97",
b"\xb8",
b"\xb5",
b"\xf8",
b"\xb7",
b"\xac",
b"\x9d",
b"\xec",
b"\x9f",
b"\xbc",
b"\xbd",
b"\xfc",
b"\xbf",
],
[
b"\x22",
b"\xc1",
b"\x62",
b"\xc3",
b"\x32",
b"\xe1",
b"\x72",
b"\xe3",
b"\x26",
b"\xc9",
b"\x66",
b"\xcb",
b"\x36",
b"\xe9",
b"\x76",
b"\xeb",
],
[
b"\x2a",
b"\xd1",
b"\x6a",
b"\xd3",
b"\x3a",
b"\xf1",
b"\x7a",
b"\xf3",
b"\x2e",
b"\xd9",
b"\x6e",
b"\xdb",
b"\x3e",
b"\xf9",
b"\x7e",
b"\xfb",
],
[
b"\xa2",
b"\xc5",
b"\xe2",
b"\xc7",
b"\xb2",
b"\xe5",
b"\xf2",
b"\xe7",
b"\xa6",
b"\xcd",
b"\xe6",
b"\xcf",
b"\xb6",
b"\xed",
b"\xf6",
b"\xef",
],
[
b"\xaa",
b"\xd5",
b"\xea",
b"\xd7",
b"\xba",
b"\xf5",
b"\xfa",
b"\xf7",
b"\xae",
b"\xdd",
b"\xee",
b"\xdf",
b"\xbe",
b"\xfd",
b"\xfe",
b"\xff",
],
]
MOSHI_SET_OFFSET = 0
MOSHI_TERMINATION = 2
MOSHI_VECTOR_SPEED = 5
MOSHI_RASTER_SPEED = 4
MOSHI_CUT_ABS = 15
MOSHI_CUT_HORIZ = 14
MOSHI_CUT_VERT = 11
MOSHI_MOVE_ABS = 7
MOSHI_MOVE_HORIZ = 6
MOSHI_MOVE_VERT = 3
MOSHI_FREEMOTOR = 1
MOSHI_ESTOP = 1
MOSHI_EPILOGUE = 2
MOSHI_PROLOGUE = 6
# 6 also seen at laser startup.
MOSHI_LASER = 7
MOSHI_READ = 14
# 14 is also sometimes done as a keepalive each 3.4 seconds.
class MoshiBlob:
"""
MoshiBlobs are datablobs of Moshi types. These are series of commands which should be executed as a program within
the Moshicontroller.
"""
def __init__(self, channel=None):
self.data = bytearray() # Queued additional commands programs.
self.channel = channel
self.last_x = 0
self.last_y = 0
self.offset_x = 0
self.offset_y = 0
self._stage = 0
def __len__(self):
return len(self.data)
def pipe_int8(self, value):
"""
Write an 8 bit into to the current program.
"""
v = bytes(
bytearray(
[
value & 0xFF,
]
)
)
self.write(v)
def pipe_int16le(self, value):
"""
Write a 16 bit little-endian value to the current program.
"""
v = bytes(
bytearray(
[
(value >> 0) & 0xFF,
(value >> 8) & 0xFF,
]
)
)
self.write(v)
def write(self, bytes_to_write):
"""
Writes data to the queue, this will be moved into the buffer by the thread in a threadsafe manner.
:param bytes_to_write: data to write to the queue.
:return:
"""
self.data += bytes_to_write
return self
def vector_speed(self, speed_mms, normal_speed_mms):
"""
Vector Speed Byte. (0x00 position), followed by 2 int8 values.
Jog and Normal Speed. These values are limited to integer values which
are 1 to 256.
:return:
"""
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel(
"Vector Cut Speed: %d mm/s Normal Speed: %d mm/s"
% (int(speed_mms), int(normal_speed_mms))
)
self.write(swizzle_table[MOSHI_VECTOR_SPEED][0])
if speed_mms > 256:
speed_mms = 256
if speed_mms < 1:
speed_mms = 1
self.pipe_int8(speed_mms - 1)
self.pipe_int8(normal_speed_mms - 1)
def raster_speed(self, speed_mms):
"""
Write speed for raster programs.
"""
assert self._stage == 0
self._stage = 1
if self.channel:
self.channel("Raster Header Speed: %d cm/s" % int(speed_mms))
self.write(swizzle_table[MOSHI_RASTER_SPEED][0])
speed_cms = int(round(speed_mms / 10))
if speed_cms == 0:
speed_cms = 1
self.pipe_int8(speed_cms - 1)
def set_offset(self, z, x, y):
"""
2nd Command For Jump. (0x03 position), followed by 3 int16le (2)
:return:
"""
assert self._stage == 1
self._stage = 2
self.offset_x = x
self.offset_y = y
if self.channel:
self.channel("Set Location z: %d, x: %d, y: %d" % (int(z), int(x), int(y)))
self.write(swizzle_table[MOSHI_SET_OFFSET][0])
self.pipe_int16le(z) # Unknown, always zero.
self.pipe_int16le(x) # x
self.pipe_int16le(y) # y
def termination(self):
"""
Terminal Commands for Jump/Program. (last 7 bytes). (4)
:return:
"""
# assert self._stage == 3
self._stage = 4
if self.channel:
self.channel("Termination.")
for i in range(7):
self.write(swizzle_table[MOSHI_TERMINATION][0])
def cut_abs(self, x, y):
"""
Write an absolute position cut value.
Laser will cut to this position from the current stored head position.
Head position is stored on the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Cut x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_CUT_ABS][1])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_abs(self, x, y):
"""
Write an absolute position move value.
Laser will move without cutting to this position from the current stored head position.
Head position is stored on the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
if x < 0:
x = 0
if y < 0:
y = 0
self.last_x = x
self.last_y = y
x -= self.offset_x
y -= self.offset_y
if self.channel:
self.channel("Move x: %d y: %d" % (int(x), int(y)))
self.write(swizzle_table[MOSHI_MOVE_ABS][0])
self.pipe_int16le(int(x))
self.pipe_int16le(int(y))
def move_vertical_abs(self, y):
"""
Write an absolute position vertical move.
Laser will move the y position without cutting to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Move Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_MOVE_VERT][0])
self.pipe_int16le(int(y))
def move_horizontal_abs(self, x):
"""
Write an absolute position horizontal move.
Laser will move the x position without cutting to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Move Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_MOVE_HORIZ][0])
self.pipe_int16le(int(x))
def cut_horizontal_abs(self, x):
"""
Write an absolute position horizontal cut.
Laser will cut to the x position with laser firing to the new position from the head position
stored in the Moshiboard.
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_x = x
x -= self.offset_x
if self.channel:
self.channel("Cut Horizontal x: %d" % int(x))
self.write(swizzle_table[MOSHI_CUT_HORIZ][0])
self.pipe_int16le(int(x))
def cut_vertical_abs(self, y):
"""
Write an absolute position vertical cut.
Laser will cut to the y position with laser firing to the new position from the head position
stored in the Moshiboard
"""
assert 2 <= self._stage <= 3
self._stage = 3
self.last_y = y
y -= self.offset_y
if self.channel:
self.channel("Cut Vertical y: %d" % int(y))
self.write(swizzle_table[MOSHI_CUT_VERT][0])
self.pipe_int16le(int(y))
@staticmethod
def _swizzle(b, p7, p6, p5, p4, p3, p2, p1, p0):
return (
((b >> 0) & 1) << p0
| ((b >> 1) & 1) << p1
| ((b >> 2) & 1) << p2
| ((b >> 3) & 1) << p3
| ((b >> 4) & 1) << p4
| ((b >> 5) & 1) << p5
| ((b >> 6) & 1) << p6
| ((b >> 7) & 1) << p7
)
@staticmethod
def convert(q):
"""
Translated Moshiboard swizzle into correct Moshi command code.
Moshiboards command codes have 16 values with 16 different swizzled values. There are
two different swizzles depending on the parity of the particular code. These codes are used
randomly by Moshi's native software. The board itself reads these all the same.
"""
if q & 1:
return MoshiBlob._swizzle(q, 7, 6, 2, 4, 3, 5, 1, 0)
else:
return MoshiBlob._swizzle(q, 5, 1, 7, 2, 4, 3, 6, 0)
@staticmethod
def reconvert(q):
"""
Counter-translate a particular command code back into correct values.
"""
for m in range(5):
q = MoshiBlob.convert(q)
return q
| [
[
[
0,
13
],
[
6930,
6943
],
[
7438,
7451
],
[
8005,
8018
],
[
8471,
8484
],
[
9080,
9093
],
[
9773,
9786
],
[
10333,
10346
],
[
10865,
10878
],
[
11399,
11412
],
[
11925,
11938
]
],
[
[
4581,
4597
],
[
8019,
8035
]
],
[
[
4602,
4619
],
[
8485,
8502
]
],
[
[
4624,
4642
],
[
6944,
6962
]
],
[
[
4647,
4665
],
[
7452,
7470
]
],
[
[
4670,
4683
],
[
9094,
9107
]
],
[
[
4689,
4704
],
[
11413,
11428
]
],
[
[
4710,
4724
],
[
11939,
11953
]
],
[
[
4730,
4744
],
[
9787,
9801
]
],
[
[
4749,
4765
],
[
10879,
10895
]
],
[
[
4770,
4785
],
[
10347,
10362
]
],
[
[
4791,
4806
]
],
[
[
4811,
4822
]
],
[
[
4827,
4841
]
],
[
[
4846,
4860
]
],
[
[
4897,
4908
]
],
[
[
4913,
4923
]
],
[
[
4998,
5007
],
[
12824,
12833
],
[
12903,
12912
],
[
13135,
13144
]
]
] |
from core.models import User
from django.http import HttpResponse, JsonResponse
from django.shortcuts import get_object_or_404
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import permissions, status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.parsers import JSONParser
from requests.exceptions import ConnectionError
from projects.models import (
Page,
Project,
ProjectMemberRole,
ProjectAuditParameters,
AvailableAuditParameters,
Script,
)
from projects.serializers import (
PageSerializer,
ProjectSerializer,
ProjectMemberRoleSerializer,
ProjectAuditParametersSerializer,
AvailableAuditParameterSerializer,
ScriptSerializer,
)
from projects.permissions import (
check_if_member_of_project,
check_if_admin_of_project,
is_admin_of_project,
)
from audits.tasks import get_wpt_audit_configurations
def get_user_projects(user_id):
return Project.objects.filter(members__id=user_id, is_active=True)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all the user’s projects", ProjectSerializer(many=True)
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectSerializer,
responses={201: openapi.Response("Returns the created project", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_list(request):
if request.method == "GET":
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects, many=True, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
data = JSONParser().parse(request)
serializer = ProjectSerializer(data=data, context={"user_id": request.user.id})
if serializer.is_valid():
project = Project.objects.create(**serializer.validated_data)
project.save()
return JsonResponse(
{"uuid": project.uuid, **serializer.data},
status=status.HTTP_201_CREATED,
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("", ProjectSerializer)},
tags=["Projects"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def first_project(request):
"""Returns the first project of the user.
This is used to speed up the loading of the first project page"""
projects = get_user_projects(request.user.id)
serializer = ProjectSerializer(
projects.first(), context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response("Returns details of a project.", ProjectSerializer)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectSerializer,
responses={
200: openapi.Response(
"Updates a project. Allows for partial updates.", ProjectSerializer
)
},
tags=["Projects"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Projects"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_detail(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
if is_admin_of_project(request.user.id, project.uuid):
serializer = ProjectSerializer(
project, context={"user_id": request.user.id}
)
return JsonResponse(serializer.data)
serializer = ProjectSerializer(
project,
fields=(
"uuid",
"name",
"project_members",
"pages",
"scripts",
"audit_parameters_list",
"screenshot_url",
"latest_audit_at",
"has_siblings",
),
context={"user_id": request.user.id},
)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectSerializer(
project, data=data, partial=True, context={"user_id": request.user.id}
)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
project.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns a list of all pages in the project", PageSerializer(many=True)
)
},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["post"],
request_body=PageSerializer,
responses={201: openapi.Response("Returns the created page", PageSerializer)},
tags=["Pages"],
)
@api_view(["GET", "POST"])
@permission_classes([permissions.IsAuthenticated])
def project_page_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if request.method == "GET":
pages = project.pages.all()
serializer = PageSerializer(pages, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == "POST":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(data=data)
if serializer.is_valid():
page = Page.objects.create(project=project, **serializer.validated_data)
page.save()
return JsonResponse(
{"uuid": page.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={200: openapi.Response("Returns details of a page.", PageSerializer)},
tags=["Pages"],
)
@swagger_auto_schema(
methods=["put"],
request_body=PageSerializer,
responses={
200: openapi.Response(
"Updates a page. Allows for partial updates.", PageSerializer
)
},
tags=["Pages"],
)
@swagger_auto_schema(methods=["delete"], responses={204: "No content"}, tags=["Pages"])
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_page_detail(request, project_uuid, page_uuid):
project = get_object_or_404(Project, pk=project_uuid)
page = get_object_or_404(Page, pk=page_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if page.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = PageSerializer(page)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = PageSerializer(page, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
page.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=ProjectAuditParametersSerializer,
responses={
201: openapi.Response(
"Returns the created project audit parameter",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameter_list(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(data=data)
if serializer.is_valid():
audit_parameter = ProjectAuditParameters.objects.create(
project=project, **serializer.validated_data
)
audit_parameter.save()
serializer = ProjectAuditParametersSerializer(audit_parameter)
return JsonResponse(serializer.data, status=status.HTTP_201_CREATED)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns the details of a project audit parameter.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectAuditParametersSerializer,
responses={
200: openapi.Response(
"Updates a project audit parameter. Allows for partial updates.",
ProjectAuditParametersSerializer,
)
},
tags=["Project Audit Parameters"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Audit Parameters"]
)
@api_view(["GET", "PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_audit_parameters_detail(request, project_uuid, audit_parameters_uuid):
project = get_object_or_404(Project, pk=project_uuid)
audit_parameters = get_object_or_404(
ProjectAuditParameters, pk=audit_parameters_uuid
)
check_if_member_of_project(request.user.id, project.uuid)
if audit_parameters.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
if request.method == "GET":
serializer = ProjectAuditParametersSerializer(audit_parameters)
return JsonResponse(serializer.data)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ProjectAuditParametersSerializer(audit_parameters, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
audit_parameters.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["put"],
request_body=ProjectMemberRoleSerializer,
responses={
200: openapi.Response(
"Updates a project member. Allows for partial updates.",
ProjectMemberRoleSerializer,
)
},
tags=["Project Members"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Project Members"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_member_detail(request, project_uuid, user_id):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
project_member = ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=user_id
)
if not project_member:
return HttpResponse(
"No project member was found", status=status.HTTP_404_NOT_FOUND
)
if request.method == "PUT":
data = JSONParser().parse(request)
if "is_admin" in data and type(data["is_admin"]) is bool:
project_member.update(is_admin=data["is_admin"])
serializer = ProjectMemberRoleSerializer(project_member.first())
return JsonResponse(serializer.data)
return HttpResponse(
"Please provide a valid 'is_admin' value.",
status=status.HTTP_400_BAD_REQUEST,
)
elif request.method == "DELETE":
project_member.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"user_id": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns the updated project with the new member.", ProjectSerializer
)
},
tags=["Project Members"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_members(request, project_uuid):
project = get_object_or_404(Project, pk=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
if "user_id" in data:
if not ProjectMemberRole.objects.filter(
project_id=project_uuid, user_id=data["user_id"]
):
user = User.objects.filter(id=data["user_id"])
if not user:
return HttpResponse(
"No user found with this id", status=status.HTTP_404_NOT_FOUND
)
project = Project.objects.filter(uuid=project_uuid).first()
project.members.add(user.first(), through_defaults={"is_admin": False})
serializer = ProjectSerializer(project)
return JsonResponse(serializer.data)
return HttpResponse(
"The user is already a member of the project",
status=status.HTTP_400_BAD_REQUEST,
)
return HttpResponse(
"You must provide a user_id", status=status.HTTP_400_BAD_REQUEST
)
@swagger_auto_schema(
methods=["post"],
request_body=openapi.Schema(
type="object", properties={"wpt_instance_url": openapi.Schema(type="string")}
),
responses={
201: openapi.Response(
"Returns discovered available audit parameters for the WPT instance URL passed in parameter",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["POST"])
def discover_available_audit_parameters(request):
data = JSONParser().parse(request)
if "wpt_instance_url" in data:
try:
get_wpt_audit_configurations(data["wpt_instance_url"])
except ConnectionError:
return JsonResponse(
{
"error": "UNREACHABLE",
"details": "The WPT instance is not reachable, please check the URL",
},
status=status.HTTP_400_BAD_REQUEST,
)
available_audit_parameters = AvailableAuditParameters.objects.filter(
is_active=True
)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
return JsonResponse(
{
"error": "MISSING_PARAMETER",
"details": "You must provide a wpt_instance_url in the request body",
},
status=status.HTTP_400_BAD_REQUEST,
)
@swagger_auto_schema(
methods=["get"],
responses={
200: openapi.Response(
"Returns all WebPageTest available audit parameters",
AvailableAuditParameterSerializer,
)
},
tags=["Project Audit Parameters"],
)
@api_view(["GET"])
@permission_classes([permissions.IsAuthenticated])
def available_audit_parameters(request):
available_audit_parameters = AvailableAuditParameters.objects.filter(is_active=True)
serializer = AvailableAuditParameterSerializer(
available_audit_parameters, many=True
)
return JsonResponse(serializer.data, safe=False)
@swagger_auto_schema(
methods=["post"],
request_body=ScriptSerializer,
responses={201: openapi.Response("Returns the created script", ScriptSerializer)},
tags=["Scripts"],
)
@api_view(["POST"])
@permission_classes([permissions.IsAuthenticated])
def project_scripts(request, project_uuid):
project = Project.objects.get(uuid=project_uuid)
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(data=data)
if serializer.is_valid():
script = Script.objects.create(project=project, **serializer.validated_data)
script.save()
return JsonResponse(
{"uuid": script.uuid, **serializer.data}, status=status.HTTP_201_CREATED
)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
@swagger_auto_schema(
methods=["put"],
request_body=ScriptSerializer,
responses={
200: openapi.Response(
"Updates a script. Allows for partial updates.", ScriptSerializer
)
},
tags=["Scripts"],
)
@swagger_auto_schema(
methods=["delete"], responses={204: "No content"}, tags=["Scripts"]
)
@api_view(["PUT", "DELETE"])
@permission_classes([permissions.IsAuthenticated])
def project_script_detail(request, project_uuid, script_uuid):
project = get_object_or_404(Project, pk=project_uuid)
script = get_object_or_404(Script, pk=script_uuid)
check_if_member_of_project(request.user.id, project.uuid)
if script.project != project:
return JsonResponse({}, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "PUT":
check_if_admin_of_project(request.user.id, project.uuid)
data = JSONParser().parse(request)
serializer = ScriptSerializer(script, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == "DELETE":
check_if_admin_of_project(request.user.id, project.uuid)
script.delete()
return JsonResponse({}, status=status.HTTP_204_NO_CONTENT)
| [
[
[
24,
28
],
[
13414,
13418
]
],
[
[
53,
65
],
[
11924,
11936
],
[
12368,
12380
],
[
13502,
13514
],
[
13889,
13901
],
[
14031,
14043
]
],
[
[
67,
79
],
[
1796,
1808
],
[
2158,
2170
],
[
2308,
2320
],
[
2888,
2900
],
[
3923,
3935
],
[
4402,
4414
],
[
4791,
4803
],
[
4836,
4848
],
[
5047,
5059
],
[
5873,
5885
],
[
6267,
6279
],
[
6397,
6409
],
[
7304,
7316
],
[
7447,
7459
],
[
7756,
7768
],
[
7801,
7813
],
[
8009,
8021
],
[
8989,
9001
],
[
9062,
9074
],
[
10282,
10294
],
[
10455,
10467
],
[
10794,
10806
],
[
10839,
10851
],
[
11059,
11071
],
[
12323,
12335
],
[
12581,
12593
],
[
13844,
13856
],
[
14829,
14841
],
[
15326,
15338
],
[
15379,
15391
],
[
16166,
16178
],
[
16865,
16877
],
[
16985,
16997
],
[
17763,
17775
],
[
18099,
18111
],
[
18144,
18156
],
[
18354,
18366
]
],
[
[
109,
126
],
[
3582,
3599
],
[
7101,
7118
],
[
7156,
7173
],
[
10011,
10028
],
[
10078,
10095
],
[
11665,
11682
],
[
13103,
13120
],
[
17552,
17569
],
[
17609,
17626
]
],
[
[
148,
155
],
[
1130,
1137
],
[
1376,
1383
],
[
2441,
2448
],
[
2992,
2999
],
[
3201,
3208
],
[
5173,
5180
],
[
5411,
5418
],
[
6530,
6537
],
[
6722,
6729
],
[
8187,
8194
],
[
9204,
9211
],
[
9514,
9521
],
[
11231,
11238
],
[
12696,
12703
],
[
12758,
12765
],
[
12825,
12832
],
[
14187,
14194
],
[
14258,
14265
],
[
14325,
14332
],
[
15662,
15669
],
[
16309,
16316
],
[
17162,
17169
]
],
[
[
183,
202
],
[
1059,
1078
],
[
1277,
1296
],
[
2379,
2398
],
[
2921,
2940
],
[
3094,
3113
],
[
3342,
3361
],
[
5102,
5121
],
[
5315,
5334
],
[
6468,
6487
],
[
6618,
6637
],
[
6854,
6873
],
[
8064,
8083
],
[
9133,
9152
],
[
9392,
9411
],
[
9715,
9734
],
[
11114,
11133
],
[
11409,
11428
],
[
12636,
12655
],
[
14127,
14146
],
[
15591,
15610
],
[
16211,
16230
],
[
17056,
17075
],
[
17300,
17319
]
],
[
[
230,
241
],
[
1518,
1529
],
[
2548,
2559
],
[
3495,
3506
],
[
5544,
5555
],
[
6998,
7009
],
[
8409,
8420
],
[
9884,
9895
],
[
11562,
11573
],
[
13015,
13026
],
[
15891,
15902
],
[
16441,
16452
],
[
17445,
17456
]
],
[
[
243,
249
],
[
2254,
2260
],
[
2347,
2353
],
[
4875,
4881
],
[
5071,
5077
],
[
6344,
6350
],
[
6436,
6442
],
[
7328,
7334
],
[
7840,
7846
],
[
8033,
8039
],
[
9026,
9032
],
[
9101,
9107
],
[
10306,
10312
],
[
10878,
10884
],
[
11083,
11089
],
[
11988,
11994
],
[
12457,
12463
],
[
12605,
12611
],
[
13573,
13579
],
[
13981,
13987
],
[
14090,
14096
],
[
15037,
15043
],
[
15553,
15559
],
[
16940,
16946
],
[
17024,
17030
],
[
17787,
17793
],
[
18183,
18189
],
[
18378,
18384
]
],
[
[
288,
296
],
[
1471,
1479
],
[
2509,
2517
],
[
3439,
3447
],
[
5497,
5505
],
[
6942,
6950
],
[
8369,
8377
],
[
9828,
9836
],
[
11513,
11521
],
[
12975,
12983
],
[
14555,
14563
],
[
15852,
15860
],
[
16401,
16409
],
[
17396,
17404
]
],
[
[
298,
316
],
[
1498,
1516
],
[
2528,
2546
],
[
3475,
3493
],
[
5524,
5542
],
[
6978,
6996
],
[
8389,
8407
],
[
9864,
9882
],
[
11542,
11560
],
[
12995,
13013
],
[
15871,
15889
],
[
16421,
16439
],
[
17425,
17443
]
],
[
[
352,
362
],
[
1888,
1898
],
[
4547,
4557
],
[
6030,
6040
],
[
7592,
7602
],
[
8621,
8631
],
[
10600,
10610
],
[
12072,
12082
],
[
13220,
13230
],
[
14635,
14645
],
[
16640,
16650
],
[
17931,
17941
]
],
[
[
395,
410
],
[
14793,
14808
]
],
[
[
445,
449
],
[
6158,
6162
],
[
7174,
7178
]
],
[
[
455,
462
],
[
996,
1003
],
[
2060,
2067
],
[
3600,
3607
],
[
5634,
5641
],
[
7119,
7126
],
[
8510,
8517
],
[
10029,
10036
],
[
11683,
11690
],
[
13121,
13128
],
[
13639,
13646
],
[
16529,
16536
],
[
17570,
17577
]
],
[
[
468,
485
],
[
11792,
11809
],
[
13289,
13306
]
],
[
[
491,
513
],
[
8766,
8788
],
[
10105,
10127
]
],
[
[
519,
543
],
[
15117,
15141
],
[
15995,
16019
]
],
[
[
549,
555
],
[
16760,
16766
],
[
17627,
17633
]
],
[
[
598,
612
],
[
5249,
5263
],
[
5375,
5389
],
[
5456,
5470
],
[
6577,
6591
],
[
6677,
6691
],
[
6799,
6813
],
[
5825,
5839
],
[
6079,
6093
],
[
7411,
7425
],
[
7641,
7655
]
],
[
[
618,
635
],
[
1207,
1224
],
[
1337,
1354
],
[
1424,
1441
],
[
2462,
2479
],
[
3042,
3059
],
[
3153,
3170
],
[
3281,
3298
],
[
12907,
12924
],
[
1682,
1699
],
[
1937,
1954
],
[
2789,
2806
],
[
3809,
3826
],
[
3974,
3991
],
[
4596,
4613
],
[
13798,
13815
]
],
[
[
641,
668
],
[
11173,
11200
],
[
11330,
11357
],
[
12252,
12279
]
],
[
[
674,
706
],
[
8124,
8156
],
[
8276,
8308
],
[
9299,
9331
],
[
9451,
9483
],
[
9622,
9654
],
[
8666,
8698
],
[
8924,
8956
],
[
10389,
10421
],
[
10649,
10681
]
],
[
[
712,
745
],
[
14461,
14494
],
[
15758,
15791
],
[
15216,
15249
],
[
16068,
16101
]
],
[
[
751,
767
],
[
16271,
16287
],
[
16356,
16372
],
[
17115,
17131
],
[
17241,
17257
],
[
16685,
16701
],
[
17980,
17996
]
],
[
[
810,
836
],
[
3630,
3656
],
[
5677,
5703
],
[
7198,
7224
],
[
10164,
10190
],
[
17655,
17681
]
],
[
[
842,
867
],
[
4475,
4500
],
[
4950,
4975
],
[
5958,
5983
],
[
7520,
7545
],
[
7915,
7940
],
[
8553,
8578
],
[
10528,
10553
],
[
10953,
10978
],
[
11713,
11738
],
[
13151,
13176
],
[
16572,
16597
],
[
17859,
17884
],
[
18258,
18283
]
],
[
[
873,
892
],
[
3732,
3751
]
],
[
[
922,
950
],
[
14723,
14751
]
],
[
[
957,
974
],
[
1626,
1643
],
[
2737,
2754
]
],
[
[
1552,
1564
]
],
[
[
2582,
2595
]
],
[
[
3529,
3543
]
],
[
[
5578,
5595
]
],
[
[
7032,
7051
]
],
[
[
8443,
8471
]
],
[
[
9918,
9949
]
],
[
[
11596,
11617
]
],
[
[
13049,
13064
]
],
[
[
14578,
14613
]
],
[
[
15925,
15951
]
],
[
[
16475,
16490
]
],
[
[
17479,
17500
]
]
] |
"""Provides the Objector class."""
from json import loads
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from .exceptions import ClientException, RedditAPIException
from .models.reddit.base import RedditBase
from .util import snake_case_keys
if TYPE_CHECKING: # pragma: no cover
from ... import praw
class Objector:
"""The objector builds :class:`.RedditBase` objects."""
@classmethod
def parse_error(
cls, data: Union[List[Any], Dict[str, Dict[str, str]]]
) -> Optional[RedditAPIException]:
"""Convert JSON response into an error object.
:param data: The dict to be converted.
:returns: An instance of :class:`~.RedditAPIException`, or ``None`` if ``data``
doesn't fit this model.
"""
if isinstance(data, list):
# Fetching a Submission returns a list (of two items). Although it's handled
# manually in `Submission._fetch()`, assume it's a possibility here.
return None
errors = data.get("json", {}).get("errors")
if errors is None:
return None
if len(errors) < 1:
# See `Collection._fetch()`.
raise ClientException("successful error response", data)
return RedditAPIException(errors)
@classmethod
def check_error(cls, data: Union[List[Any], Dict[str, Dict[str, str]]]):
"""Raise an error if the argument resolves to an error object."""
error = cls.parse_error(data)
if error:
raise error
def __init__(self, reddit: "praw.Reddit", parsers: Optional[Dict[str, Any]] = None):
"""Initialize an Objector instance.
:param reddit: An instance of :class:`~.Reddit`.
"""
self.parsers = {} if parsers is None else parsers
self._reddit = reddit
def _objectify_dict(self, data):
"""Create RedditBase objects from dicts.
:param data: The structured data, assumed to be a dict.
:returns: An instance of :class:`~.RedditBase`.
"""
if {"conversation", "messages", "modActions"}.issubset(data):
parser = self.parsers["ModmailConversation"]
elif {"actionTypeId", "author", "date"}.issubset(data):
# Modmail mod action
data = snake_case_keys(data)
parser = self.parsers["ModmailAction"]
elif {"bodyMarkdown", "isInternal"}.issubset(data):
# Modmail message
data = snake_case_keys(data)
parser = self.parsers["ModmailMessage"]
elif {"kind", "short_name", "violation_reason"}.issubset(data):
# This is a Rule
parser = self.parsers["rule"]
elif {"isAdmin", "isDeleted"}.issubset(data):
# Modmail author
data = snake_case_keys(data)
# Prevent clobbering base-36 id
del data["id"]
data["is_subreddit_mod"] = data.pop("is_mod")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"banStatus", "muteStatus", "recentComments"}.issubset(data):
# Modmail user
data = snake_case_keys(data)
data["created_string"] = data.pop("created")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"displayName", "id", "type"}.issubset(data):
# Modmail subreddit
data = snake_case_keys(data)
parser = self.parsers[self._reddit.config.kinds[data["type"]]]
elif {"date", "id", "name"}.issubset(data) or {
"id",
"name",
"permissions",
}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["redditor"]]
elif {"text", "url"}.issubset(data):
if "color" in data or "linkUrl" in data:
parser = self.parsers["Button"]
else:
parser = self.parsers["MenuLink"]
elif {"children", "text"}.issubset(data):
parser = self.parsers["Submenu"]
elif {"height", "url", "width"}.issubset(data):
parser = self.parsers["Image"]
elif {"isSubscribed", "name", "subscribers"}.issubset(data):
# discards icon and subscribed information
return self._reddit.subreddit(data["name"])
elif {"authorFlairType", "name"}.issubset(data):
# discards flair information
return self._reddit.redditor(data["name"])
elif {"parent_id"}.issubset(data):
parser = self.parsers[self._reddit.config.kinds["comment"]]
elif "collection_id" in data.keys():
parser = self.parsers["Collection"]
elif {"moderators", "moderatorIds", "allUsersLoaded", "subredditId"}.issubset(
data
):
data = snake_case_keys(data)
moderators = []
for mod_id in data["moderator_ids"]:
mod = snake_case_keys(data["moderators"][mod_id])
mod["mod_permissions"] = list(mod["mod_permissions"].keys())
moderators.append(mod)
data["moderators"] = moderators
parser = self.parsers["moderator-list"]
elif "username" in data.keys():
data["name"] = data.pop("username")
parser = self.parsers[self._reddit.config.kinds["redditor"]]
else:
if "user" in data:
parser = self.parsers[self._reddit.config.kinds["redditor"]]
data["user"] = parser.parse({"name": data["user"]}, self._reddit)
return data
return parser.parse(data, self._reddit)
def objectify(
self, data: Optional[Union[Dict[str, Any], List[Any]]]
) -> Optional[Union[RedditBase, Dict[str, Any], List[Any]]]:
"""Create RedditBase objects from data.
:param data: The structured data.
:returns: An instance of :class:`~.RedditBase`, or ``None`` if given ``data`` is
``None``.
"""
# pylint: disable=too-many-return-statements
if data is None: # 204 no content
return None
if isinstance(data, list):
return [self.objectify(item) for item in data]
if "json" in data and "errors" in data["json"]:
errors = data["json"]["errors"]
if len(errors) > 0:
raise RedditAPIException(errors)
if "kind" in data and (
"shortName" in data or data["kind"] in ("menu", "moderators")
):
# This is a widget
parser = self.parsers.get(data["kind"], self.parsers["widget"])
return parser.parse(data, self._reddit)
if {"kind", "data"}.issubset(data) and data["kind"] in self.parsers:
parser = self.parsers[data["kind"]]
return parser.parse(data["data"], self._reddit)
if "json" in data and "data" in data["json"]:
if "websocket_url" in data["json"]["data"]:
return data
if "things" in data["json"]["data"]: # Submission.reply
return self.objectify(data["json"]["data"]["things"])
if "rules" in data["json"]["data"]:
return self.objectify(loads(data["json"]["data"]["rules"]))
if "url" in data["json"]["data"]: # Subreddit.submit
# The URL is the URL to the submission, so it's removed.
del data["json"]["data"]["url"]
parser = self.parsers[self._reddit.config.kinds["submission"]]
if data["json"]["data"]["id"].startswith(
f"{self._reddit.config.kinds['submission']}_"
):
# With polls, Reddit returns a fullname but calls it an "id". This
# fixes this by coercing the fullname into an id.
data["json"]["data"]["id"] = data["json"]["data"]["id"].split(
"_", 1
)[1]
else:
parser = self.parsers["LiveUpdateEvent"]
return parser.parse(data["json"]["data"], self._reddit)
if "rules" in data:
return self.objectify(data["rules"])
elif isinstance(data, dict):
return self._objectify_dict(data)
return data
| [
[
[
53,
58
],
[
7213,
7218
]
],
[
[
78,
91
],
[
268,
281
]
],
[
[
93,
96
],
[
475,
478
],
[
1361,
1364
],
[
1625,
1628
],
[
5759,
5762
],
[
5770,
5773
],
[
5695,
5698
],
[
5706,
5709
]
],
[
[
98,
102
],
[
481,
485
],
[
491,
495
],
[
1367,
1371
],
[
1377,
1381
],
[
1615,
1619
],
[
5749,
5753
],
[
5685,
5689
]
],
[
[
104,
108
],
[
470,
474
],
[
1356,
1360
],
[
5765,
5769
],
[
5701,
5705
]
],
[
[
110,
118
],
[
517,
525
],
[
1606,
1614
],
[
5722,
5730
],
[
5670,
5678
]
],
[
[
120,
125
],
[
464,
469
],
[
1350,
1355
],
[
5731,
5736
],
[
5679,
5684
]
],
[
[
151,
166
],
[
1208,
1223
]
],
[
[
168,
186
],
[
526,
544
],
[
1274,
1292
],
[
6362,
6380
]
],
[
[
219,
229
],
[
5737,
5747
]
],
[
[
248,
263
],
[
2308,
2323
],
[
2490,
2505
],
[
2809,
2824
],
[
3154,
3169
],
[
3416,
3431
],
[
4816,
4831
],
[
4937,
4952
]
],
[
[
323,
327
]
],
[
[
336,
344
]
]
] |
from .core import *
SCHEMA_VERSION = 'v2.6.5'
SCHEMA_URL = 'https://vega.github.io/schema/vega/v2.6.5.json'
| [
[
[
18,
19
]
],
[
[
21,
35
]
],
[
[
47,
57
]
]
] |
"""Various input/output utility functions"""
from typing import Any, Optional
import os
import re
from io import BytesIO
import cloudpickle
import pandas as pd
from zstandard import ZstdCompressor, ZstdDecompressor
COMPRESSION_MAX_OUTPUT_SIZE = 10 ** 9 # 1GB
def pickle_dumps(variable: object) -> bytes:
pickle: bytes = cloudpickle.dumps(variable)
return pickle
def pickle_loads(dumped_pickle: bytes) -> Any:
return cloudpickle.loads(dumped_pickle)
def save_df(df: pd.DataFrame, format: str = "csv") -> bytes:
pandas_version: int = int(re.sub("[^0-9]", "", pd.__version__))
if format == "csv":
csv_buffer = BytesIO()
if pandas_version >= 120:
df.to_csv(csv_buffer, index=False)
else:
csv_buffer.write(df.to_csv(index=False).encode("utf-8"))
csv_buffer.seek(0)
return csv_buffer.getvalue()
else:
raise ValueError("Invalid method: {method}. Choose 'csv'.")
def compress(data: bytes, method: Optional[str] = "zstd") -> bytes:
if method == "zstd":
compressor = ZstdCompressor(level=3, write_checksum=True)
compressed_data = compressor.compress(data)
elif method is None:
compressed_data = data
# elif compression == "lz4":
# import lz4.frame
# data = lz4.frame.compress(data, compression_level=3, content_checksum=True)
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return compressed_data
def decompress(
data: bytes, method: Optional[str] = "zstd", max_output_size: int = COMPRESSION_MAX_OUTPUT_SIZE
) -> bytes:
if method == "zstd":
decompressor = ZstdDecompressor()
decompressed_data = decompressor.decompress(data, max_output_size=max_output_size)
elif method is None:
decompressed_data = data
else:
raise ValueError("Invalid compression method: {method}. Choose 'zstd' or None.")
return decompressed_data
| [
[
[
65,
68
],
[
421,
424
]
],
[
[
70,
78
],
[
1002,
1010
],
[
1551,
1559
]
],
[
[
87,
89
]
],
[
[
97,
99
],
[
563,
565
]
],
[
[
115,
122
],
[
646,
653
]
],
[
[
131,
142
],
[
331,
342
],
[
437,
448
]
],
[
[
150,
162
],
[
488,
490
],
[
584,
586
]
],
[
[
185,
199
],
[
1082,
1096
]
],
[
[
201,
217
],
[
1686,
1702
]
],
[
[
219,
246
],
[
1598,
1625
]
],
[
[
270,
282
]
],
[
[
383,
395
]
],
[
[
476,
483
]
],
[
[
972,
980
]
],
[
[
1514,
1524
]
]
] |
# coding: utf-8
from __future__ import unicode_literals
from io import StringIO, BytesIO
from pathlib import Path
import pytest
from .util import load_test_model
from ..tokens import Doc
from ..strings import StringStore
from .. import util
# These languages are used for generic tokenizer tests – only add a language
# here if it's using spaCy's tokenizer (not a different library)
# TODO: re-implement generic tokenizer tests
_languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'xx']
_models = {'en': ['en_core_web_sm'],
'de': ['de_core_news_sm'],
'fr': ['fr_core_news_sm'],
'xx': ['xx_ent_web_sm'],
'en_core_web_md': ['en_core_web_md'],
'es_core_news_md': ['es_core_news_md']}
# only used for tests that require loading the models
# in all other cases, use specific instances
@pytest.fixture(params=_models['en'])
def EN(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['de'])
def DE(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['fr'])
def FR(request):
return load_test_model(request.param)
@pytest.fixture()
def RU(request):
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru')()
#@pytest.fixture(params=_languages)
#def tokenizer(request):
#lang = util.get_lang_class(request.param)
#return lang.Defaults.create_tokenizer()
@pytest.fixture
def tokenizer():
return util.get_lang_class('xx').Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return util.get_lang_class('en').Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return util.get_lang_class('en').Defaults.create_vocab()
@pytest.fixture
def en_parser(en_vocab):
nlp = util.get_lang_class('en')(en_vocab)
return nlp.create_pipe('parser')
@pytest.fixture
def es_tokenizer():
return util.get_lang_class('es').Defaults.create_tokenizer()
@pytest.fixture
def de_tokenizer():
return util.get_lang_class('de').Defaults.create_tokenizer()
@pytest.fixture
def fr_tokenizer():
return util.get_lang_class('fr').Defaults.create_tokenizer()
@pytest.fixture
def hu_tokenizer():
return util.get_lang_class('hu').Defaults.create_tokenizer()
@pytest.fixture
def fi_tokenizer():
return util.get_lang_class('fi').Defaults.create_tokenizer()
@pytest.fixture
def ro_tokenizer():
return util.get_lang_class('ro').Defaults.create_tokenizer()
@pytest.fixture
def id_tokenizer():
return util.get_lang_class('id').Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return util.get_lang_class('sv').Defaults.create_tokenizer()
@pytest.fixture
def bn_tokenizer():
return util.get_lang_class('bn').Defaults.create_tokenizer()
@pytest.fixture
def ga_tokenizer():
return util.get_lang_class('ga').Defaults.create_tokenizer()
@pytest.fixture
def he_tokenizer():
return util.get_lang_class('he').Defaults.create_tokenizer()
@pytest.fixture
def nb_tokenizer():
return util.get_lang_class('nb').Defaults.create_tokenizer()
@pytest.fixture
def da_tokenizer():
return util.get_lang_class('da').Defaults.create_tokenizer()
@pytest.fixture
def ja_tokenizer():
janome = pytest.importorskip("MeCab")
return util.get_lang_class('ja').Defaults.create_tokenizer()
@pytest.fixture
def th_tokenizer():
pythainlp = pytest.importorskip("pythainlp")
return util.get_lang_class('th').Defaults.create_tokenizer()
@pytest.fixture
def tr_tokenizer():
return util.get_lang_class('tr').Defaults.create_tokenizer()
@pytest.fixture
def ar_tokenizer():
return util.get_lang_class('ar').Defaults.create_tokenizer()
@pytest.fixture
def ru_tokenizer():
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru').Defaults.create_tokenizer()
@pytest.fixture
def stringstore():
return StringStore()
@pytest.fixture
def en_entityrecognizer():
return util.get_lang_class('en').Defaults.create_entity()
@pytest.fixture
def text_file():
return StringIO()
@pytest.fixture
def text_file_b():
return BytesIO()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
for lang in _languages + ['all']:
parser.addoption("--%s" % lang, action="store_true", help="Use %s models" % lang)
for model in _models:
if model not in _languages:
parser.addoption("--%s" % model, action="store_true", help="Use %s model" % model)
def pytest_runtest_setup(item):
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not item.config.getoption("--%s" % opt):
pytest.skip("need --%s option to run" % opt)
# Check if test is marked with models and has arguments set, i.e. specific
# language. If so, skip test if flag not set.
if item.get_marker('models'):
for arg in item.get_marker('models').args:
if not item.config.getoption("--%s" % arg) and not item.config.getoption("--all"):
pytest.skip("need --%s or --all option to run" % arg)
| [
[
[
39,
55
]
],
[
[
72,
80
],
[
4136,
4144
]
],
[
[
82,
89
],
[
4195,
4202
]
],
[
[
110,
114
]
],
[
[
122,
128
],
[
945,
951
],
[
1044,
1050
],
[
1143,
1149
],
[
1242,
1248
],
[
1513,
1519
],
[
1613,
1619
],
[
1716,
1722
],
[
1811,
1817
],
[
1937,
1943
],
[
2040,
2046
],
[
2143,
2149
],
[
2246,
2252
],
[
2349,
2355
],
[
2452,
2458
],
[
2555,
2561
],
[
2658,
2664
],
[
2761,
2767
],
[
2864,
2870
],
[
2967,
2973
],
[
3070,
3076
],
[
3172,
3178
],
[
3274,
3280
],
[
3418,
3424
],
[
3569,
3575
],
[
3671,
3677
],
[
3773,
3779
],
[
3924,
3930
],
[
3986,
3992
],
[
4093,
4099
],
[
4150,
4156
],
[
1291,
1297
],
[
3322,
3328
],
[
3469,
3475
],
[
3823,
3829
],
[
5025,
5031
],
[
5396,
5402
]
],
[
[
148,
163
],
[
1010,
1025
],
[
1109,
1124
],
[
1208,
1223
]
],
[
[
185,
188
]
],
[
[
211,
222
],
[
3969,
3980
]
],
[
[
238,
242
],
[
1335,
1339
],
[
1556,
1560
],
[
1659,
1663
],
[
1758,
1762
],
[
1861,
1865
],
[
1983,
1987
],
[
2086,
2090
],
[
2189,
2193
],
[
2292,
2296
],
[
2395,
2399
],
[
2498,
2502
],
[
2601,
2605
],
[
2704,
2708
],
[
2807,
2811
],
[
2910,
2914
],
[
3013,
3017
],
[
3116,
3120
],
[
3218,
3222
],
[
3362,
3366
],
[
3513,
3517
],
[
3615,
3619
],
[
3717,
3721
],
[
3867,
3871
],
[
4039,
4043
]
],
[
[
432,
442
],
[
4587,
4597
],
[
4749,
4759
]
],
[
[
593,
600
],
[
967,
974
],
[
1066,
1073
],
[
1165,
1172
],
[
4716,
4723
]
],
[
[
986,
988
]
],
[
[
1085,
1087
]
],
[
[
1184,
1186
]
],
[
[
1263,
1265
]
],
[
[
1532,
1541
]
],
[
[
1632,
1644
]
],
[
[
1735,
1743
]
],
[
[
1830,
1839
]
],
[
[
1956,
1968
]
],
[
[
2059,
2071
]
],
[
[
2162,
2174
]
],
[
[
2265,
2277
]
],
[
[
2368,
2380
]
],
[
[
2471,
2483
]
],
[
[
2574,
2586
]
],
[
[
2677,
2689
]
],
[
[
2780,
2792
]
],
[
[
2883,
2895
]
],
[
[
2986,
2998
]
],
[
[
3089,
3101
]
],
[
[
3191,
3203
]
],
[
[
3293,
3305
]
],
[
[
3437,
3449
]
],
[
[
3588,
3600
]
],
[
[
3690,
3702
]
],
[
[
3792,
3804
]
],
[
[
3943,
3954
]
],
[
[
4005,
4024
]
],
[
[
4112,
4121
]
],
[
[
4169,
4180
]
],
[
[
4211,
4227
]
],
[
[
4862,
4882
]
]
] |
from loss.BCELoss import cal_bce_loss
from loss.HEL import cal_hel_loss
from loss.IOULoss import cal_iou_loss, cal_weighted_iou_loss
from loss.L12Loss import cal_mae_loss, cal_mse_loss
from loss.SSIM import cal_ssim_loss
supported_loss = dict(
bce=cal_bce_loss,
hel=cal_hel_loss,
iou=cal_iou_loss,
weighted_iou=cal_weighted_iou_loss,
mae=cal_mae_loss,
mse=cal_mse_loss,
ssim=cal_ssim_loss,
)
def get_loss_combination_with_cfg(loss_cfg: dict) -> dict:
loss_combination = {}
for loss_name, with_loss in loss_cfg.items():
if with_loss:
if loss_func := supported_loss.get(loss_name):
loss_combination[loss_name] = loss_func
else:
raise Exception(f"{loss_name} is not be supported!")
return loss_combination
| [
[
[
25,
37
],
[
253,
265
]
],
[
[
59,
71
],
[
275,
287
]
],
[
[
97,
109
],
[
297,
309
]
],
[
[
111,
132
],
[
328,
349
]
],
[
[
158,
170
],
[
359,
371
]
],
[
[
172,
184
],
[
381,
393
]
],
[
[
207,
220
],
[
404,
417
]
],
[
[
222,
236
],
[
608,
622
]
],
[
[
427,
456
]
]
] |
#!/usr/bin/env python3
#https://codeforces.com/group/H9K9zY8tcT/contest/297258/problem/B
#heap?
from queue import PriorityQueue
n = int(input())
g = {}
c = {str(i):0 for i in range(1,n+1)} #children count
for i in range(1,n+1):
k = str(i)
g[k] = input().split() # l[0]=weight; l[1]=no use; l[2:] parents;
for p in g[k][2:]:
c[p] += 1
q = PriorityQueue()
[q.put((int(g[k][0]),k)) for k in c if c[k]==0]
m = 0
i = n-1
while not q.empty():
w,k = q.get()
l = i + w
i -= 1
if l>m:
m = l
for p in g[k][2:]:
c[p] -= 1
if c[p]==0:
q.put((int(g[p][0]),p))
print(m)
| [
[
[
115,
128
],
[
366,
379
]
],
[
[
130,
131
],
[
185,
186
],
[
227,
228
],
[
440,
441
]
],
[
[
147,
148
],
[
255,
256
],
[
334,
335
],
[
394,
395
],
[
550,
551
],
[
621,
622
]
],
[
[
154,
155
],
[
352,
353
],
[
416,
417
],
[
421,
422
],
[
568,
569
],
[
589,
590
]
],
[
[
214,
215
],
[
248,
249
]
],
[
[
237,
238
],
[
257,
258
],
[
336,
337
]
],
[
[
329,
330
],
[
354,
355
]
],
[
[
362,
363
],
[
383,
384
],
[
454,
455
],
[
475,
476
],
[
610,
611
]
],
[
[
430,
431
],
[
520,
521
],
[
640,
641
]
],
[
[
436,
437
],
[
493,
494
],
[
503,
504
]
],
[
[
469,
470
],
[
497,
498
]
],
[
[
471,
472
],
[
552,
553
]
],
[
[
487,
488
],
[
518,
519
],
[
535,
536
]
],
[
[
531,
532
],
[
520,
521
],
[
640,
641
]
],
[
[
545,
546
],
[
570,
571
],
[
591,
592
],
[
623,
624
],
[
630,
631
]
]
] |
from django.forms import ModelForm
from .models import MRIScan
class MRIScanForm(ModelForm):
class Meta:
model = MRIScan
fields = ['case_id', 't1', 't1ce', 't2', 'flair'] | [
[
[
25,
34
],
[
82,
91
]
],
[
[
55,
62
],
[
126,
133
]
],
[
[
70,
81
]
]
] |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for task_set.tasks.fixed_text_rnn_classification."""
from absl.testing import parameterized
from task_set import registry
from task_set.tasks import family_test_utils
from task_set.tasks.fixed import fixed_text_rnn_classification # pylint: disable=unused-import
import tensorflow.compat.v1 as tf
class FixedTextRNNClassificationTest(family_test_utils.SingleTaskTestCase):
def test_right_number_of_tasks(self):
task_names = registry.task_registry.get_all_fixed_config_names()
self.assertLen(task_names, 12)
@parameterized.parameters(registry.task_registry.get_all_fixed_config_names())
def test_tasks(self, task_name):
self.task_test(registry.task_registry.get_instance(task_name))
if __name__ == "__main__":
tf.test.main()
| [
[
[
695,
708
],
[
1142,
1155
]
],
[
[
731,
739
],
[
1167,
1175
],
[
1051,
1059
],
[
1274,
1282
]
],
[
[
767,
784
],
[
954,
971
]
],
[
[
818,
847
]
],
[
[
888,
914
],
[
1353,
1355
]
],
[
[
923,
953
]
]
] |
# get hsv values using trackbar
import cv2
import numpy as np
import time
# A required callback method that goes into the trackbar function.
def nothing(x):
pass
# Initializing the webcam feed.
cap = cv2.VideoCapture(0)
cap.set(3,1280)
cap.set(4,720)
# Create a window named trackbars.
cv2.namedWindow("Trackbars")
# Now create 6 trackbars that will control the lower and upper range of
# H,S and V channels. The Arguments are like this: Name of trackbar,
# window name, range,callback function. For Hue the range is 0-179 and
# for S,V its 0-255.
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
while True:
# Start reading the webcam feed frame by frame.
ret, frame = cap.read()
if not ret:
break
# Flip the frame horizontally (Not required)
frame = cv2.flip( frame, 1 )
# Convert the BGR image to HSV image.
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Get the new values of the trackbar in real time as the user changes
# them
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
# Set the lower and upper HSV range according to the value selected
# by the trackbar
lower_range = np.array([l_h, l_s, l_v])
upper_range = np.array([u_h, u_s, u_v])
# Filter the image and get the binary mask, where white represents
# your target color
mask = cv2.inRange(hsv, lower_range, upper_range)
# You can also visualize the real part of the target color (Optional)
res = cv2.bitwise_and(frame, frame, mask=mask)
# Converting the binary mask to 3 channel image, this is just so
# we can stack it with the others
mask_3 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
# stack the mask, orginal frame and the filtered result
stacked = np.hstack((mask_3,frame,res))
# Show this stacked frame at 40% of the size.
cv2.imshow('Trackbars',cv2.resize(stacked,None,fx=0.4,fy=0.4))
# If the user presses ESC then exit the program
key = cv2.waitKey(1)
if key == 27:
break
# If the user presses `s` then print this array.
if key == ord('s'):
thearray = [[l_h,l_s,l_v],[u_h, u_s, u_v]]
print(thearray)
# Also save this array as penval.npy
np.save('penval',thearray)
break
# Release the camera & destroy the windows.
cap.release()
cv2.destroyAllWindows() | [
[
[
39,
42
],
[
206,
209
],
[
293,
296
],
[
558,
561
],
[
616,
619
],
[
674,
677
],
[
732,
735
],
[
792,
795
],
[
852,
855
],
[
1104,
1107
],
[
1183,
1186
],
[
1203,
1206
],
[
1323,
1326
],
[
1374,
1377
],
[
1425,
1428
],
[
1476,
1479
],
[
1527,
1530
],
[
1578,
1581
],
[
1915,
1918
],
[
2044,
2047
],
[
2211,
2214
],
[
2230,
2233
],
[
2418,
2421
],
[
2441,
2444
],
[
2548,
2551
],
[
2931,
2934
]
],
[
[
50,
61
],
[
1733,
1735
],
[
1777,
1779
],
[
2329,
2331
],
[
2823,
2825
]
],
[
[
69,
73
]
],
[
[
146,
153
],
[
607,
614
],
[
665,
672
],
[
723,
730
],
[
783,
790
],
[
843,
850
],
[
903,
910
]
],
[
[
200,
203
],
[
226,
229
],
[
242,
245
],
[
1002,
1005
],
[
2917,
2920
]
],
[
[
989,
992
],
[
1024,
1027
]
],
[
[
994,
999
],
[
1114,
1119
]
],
[
[
1096,
1101
],
[
1196,
1201
],
[
2060,
2065
],
[
2067,
2072
],
[
2347,
2352
]
],
[
[
1177,
1180
],
[
1927,
1930
]
],
[
[
1317,
1320
],
[
1743,
1746
],
[
2707,
2710
]
],
[
[
1368,
1371
],
[
1748,
1751
],
[
2711,
2714
]
],
[
[
1419,
1422
],
[
1753,
1756
],
[
2715,
2718
]
],
[
[
1470,
1473
],
[
1787,
1790
],
[
2721,
2724
]
],
[
[
1521,
1524
],
[
1792,
1795
],
[
2726,
2729
]
],
[
[
1572,
1575
],
[
1797,
1800
],
[
2731,
2734
]
],
[
[
1719,
1730
],
[
1932,
1943
]
],
[
[
1763,
1774
],
[
1945,
1956
]
],
[
[
1908,
1912
],
[
2079,
2083
],
[
2224,
2228
]
],
[
[
2038,
2041
],
[
2353,
2356
]
],
[
[
2202,
2208
],
[
2340,
2346
]
],
[
[
2319,
2326
],
[
2452,
2459
]
],
[
[
2542,
2545
],
[
2570,
2573
],
[
2660,
2663
]
],
[
[
2694,
2702
],
[
2751,
2759
],
[
2840,
2848
]
]
] |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Direct3D10_1
GUID : 9b7e4c8f-342c-4106-a19f-4f2704f689f0
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=1, version=0)
class Microsoft_Windows_Direct3D10_1_1_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchOldDebugObjectName" / Int32ul,
"OldDebugObjectName" / Bytes(lambda this: this.CchOldDebugObjectName),
"CchNewDebugObjectName" / Int32ul,
"NewDebugObjectName" / Bytes(lambda this: this.CchNewDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=2, version=0)
class Microsoft_Windows_Direct3D10_1_2_0(Etw):
pattern = Struct(
"pObject" / Int64ul,
"CchDebugObjectName" / Int32ul,
"DebugObjectName" / Bytes(lambda this: this.CchDebugObjectName)
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=3, version=0)
class Microsoft_Windows_Direct3D10_1_3_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=4, version=0)
class Microsoft_Windows_Direct3D10_1_4_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=5, version=0)
class Microsoft_Windows_Direct3D10_1_5_0(Etw):
pattern = Struct(
"pID3D10_1Device" / Int64ul,
"pIDXGIDevice" / Int64ul,
"pIDXGIAdapter" / Int64ul,
"CreationFlags" / Int32ul,
"FeatureLevel" / Int32ul,
"hKMAdapter" / Int32ul,
"hUMAdapter" / Int64ul,
"UMAdapterVersion" / Int64ul,
"hKMDevice" / Int32ul,
"hUMDevice" / Int64ul,
"UMDeviceVersion" / Int64ul,
"UMDeviceFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=6, version=0)
class Microsoft_Windows_Direct3D10_1_6_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=7, version=0)
class Microsoft_Windows_Direct3D10_1_7_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=8, version=0)
class Microsoft_Windows_Direct3D10_1_8_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=9, version=0)
class Microsoft_Windows_Direct3D10_1_9_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=10, version=0)
class Microsoft_Windows_Direct3D10_1_10_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=11, version=0)
class Microsoft_Windows_Direct3D10_1_11_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=12, version=0)
class Microsoft_Windows_Direct3D10_1_12_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=13, version=0)
class Microsoft_Windows_Direct3D10_1_13_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=14, version=0)
class Microsoft_Windows_Direct3D10_1_14_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=15, version=0)
class Microsoft_Windows_Direct3D10_1_15_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=16, version=0)
class Microsoft_Windows_Direct3D10_1_16_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=17, version=0)
class Microsoft_Windows_Direct3D10_1_17_0(Etw):
pattern = Struct(
"pID3D10Resource" / Int64ul,
"pIDXGISurface" / Int64ul,
"pID3D10_1Device" / Int64ul,
"Dimension" / Int32ul,
"Usage" / Int32ul,
"Width" / Int32ul,
"Height" / Int32ul,
"Depth" / Int32ul,
"MipLevels" / Int32ul,
"ArraySize" / Int32ul,
"Format" / Int32ul,
"SampleCount" / Int32ul,
"SampleQuality" / Int32ul,
"BindFlags" / Int32ul,
"CPUAccessFlags" / Int32ul,
"MiscFlags" / Int32ul,
"hKMResource" / Int32ul,
"hUMResource" / Int64ul,
"UMResourceMiscFlags" / Int32ul
)
@declare(guid=guid("9b7e4c8f-342c-4106-a19f-4f2704f689f0"), event_id=18, version=0)
class Microsoft_Windows_Direct3D10_1_18_0(Etw):
pattern = Struct(
"Resources" / Int32ul,
"pIDXGISurfaces" / Int64ul,
"hNewKMResources" / Int32ul
)
| [
[
[
129,
135
]
],
[
[
137,
143
]
],
[
[
145,
152
]
],
[
[
154,
161
]
],
[
[
163,
170
]
],
[
[
172,
179
],
[
582,
589
],
[
704,
711
],
[
1011,
1018
],
[
1384,
1391
],
[
1418,
1425
],
[
1450,
1457
],
[
1551,
1558
],
[
1654,
1661
],
[
1954,
1961
],
[
1988,
1995
],
[
2020,
2027
],
[
2121,
2128
],
[
2224,
2231
],
[
2524,
2531
],
[
2558,
2565
],
[
2590,
2597
],
[
2691,
2698
],
[
2794,
2801
],
[
3093,
3100
],
[
3120,
3127
],
[
3147,
3154
],
[
3175,
3182
],
[
3202,
3209
],
[
3233,
3240
],
[
3264,
3271
],
[
3292,
3299
],
[
3325,
3332
],
[
3360,
3367
],
[
3391,
3398
],
[
3427,
3434
],
[
3458,
3465
],
[
3491,
3498
],
[
3565,
3572
],
[
3864,
3871
],
[
3891,
3898
],
[
3918,
3925
],
[
3946,
3953
],
[
3973,
3980
],
[
4004,
4011
],
[
4035,
4042
],
[
4063,
4070
],
[
4096,
4103
],
[
4131,
4138
],
[
4162,
4169
],
[
4198,
4205
],
[
4229,
4236
],
[
4262,
4269
],
[
4336,
4343
],
[
4635,
4642
],
[
4662,
4669
],
[
4689,
4696
],
[
4717,
4724
],
[
4744,
4751
],
[
4775,
4782
],
[
4806,
4813
],
[
4834,
4841
],
[
4867,
4874
],
[
4902,
4909
],
[
4933,
4940
],
[
4969,
4976
],
[
5000,
5007
],
[
5033,
5040
],
[
5107,
5114
],
[
5406,
5413
],
[
5433,
5440
],
[
5460,
5467
],
[
5488,
5495
],
[
5515,
5522
],
[
5546,
5553
],
[
5577,
5584
],
[
5605,
5612
],
[
5638,
5645
],
[
5673,
5680
],
[
5704,
5711
],
[
5740,
5747
],
[
5771,
5778
],
[
5804,
5811
],
[
5878,
5885
],
[
6179,
6186
],
[
6206,
6213
],
[
6233,
6240
],
[
6261,
6268
],
[
6288,
6295
],
[
6319,
6326
],
[
6350,
6357
],
[
6378,
6385
],
[
6411,
6418
],
[
6446,
6453
],
[
6477,
6484
],
[
6513,
6520
],
[
6544,
6551
],
[
6577,
6584
],
[
6651,
6658
],
[
6952,
6959
],
[
6979,
6986
],
[
7006,
7013
],
[
7034,
7041
],
[
7061,
7068
],
[
7092,
7099
],
[
7123,
7130
],
[
7151,
7158
],
[
7184,
7191
],
[
7219,
7226
],
[
7250,
7257
],
[
7286,
7293
],
[
7317,
7324
],
[
7350,
7357
],
[
7424,
7431
],
[
7725,
7732
],
[
7752,
7759
],
[
7779,
7786
],
[
7807,
7814
],
[
7834,
7841
],
[
7865,
7872
],
[
7896,
7903
],
[
7924,
7931
],
[
7957,
7964
],
[
7992,
7999
],
[
8023,
8030
],
[
8059,
8066
],
[
8090,
8097
],
[
8123,
8130
],
[
8197,
8204
],
[
8498,
8505
],
[
8525,
8532
],
[
8552,
8559
],
[
8580,
8587
],
[
8607,
8614
],
[
8638,
8645
],
[
8669,
8676
],
[
8697,
8704
],
[
8730,
8737
],
[
8765,
8772
],
[
8796,
8803
],
[
8832,
8839
],
[
8863,
8870
],
[
8896,
8903
],
[
8970,
8977
],
[
9271,
9278
],
[
9298,
9305
],
[
9325,
9332
],
[
9353,
9360
],
[
9380,
9387
],
[
9411,
9418
],
[
9442,
9449
],
[
9470,
9477
],
[
9503,
9510
],
[
9538,
9545
],
[
9569,
9576
],
[
9605,
9612
],
[
9636,
9643
],
[
9669,
9676
],
[
9743,
9750
],
[
10044,
10051
],
[
10071,
10078
],
[
10098,
10105
],
[
10126,
10133
],
[
10153,
10160
],
[
10184,
10191
],
[
10215,
10222
],
[
10243,
10250
],
[
10276,
10283
],
[
10311,
10318
],
[
10342,
10349
],
[
10378,
10385
],
[
10409,
10416
],
[
10442,
10449
],
[
10516,
10523
],
[
10817,
10824
],
[
10844,
10851
],
[
10871,
10878
],
[
10899,
10906
],
[
10926,
10933
],
[
10957,
10964
],
[
10988,
10995
],
[
11016,
11023
],
[
11049,
11056
],
[
11084,
11091
],
[
11115,
11122
],
[
11151,
11158
],
[
11182,
11189
],
[
11215,
11222
],
[
11289,
11296
],
[
11590,
11597
],
[
11617,
11624
],
[
11644,
11651
],
[
11672,
11679
],
[
11699,
11706
],
[
11730,
11737
],
[
11761,
11768
],
[
11789,
11796
],
[
11822,
11829
],
[
11857,
11864
],
[
11888,
11895
],
[
11924,
11931
],
[
11955,
11962
],
[
11988,
11995
],
[
12062,
12069
],
[
12254,
12261
],
[
12327,
12334
]
],
[
[
181,
188
]
],
[
[
190,
197
],
[
539,
546
],
[
971,
978
],
[
1280,
1287
],
[
1314,
1321
],
[
1349,
1356
],
[
1482,
1489
],
[
1520,
1527
],
[
1582,
1589
],
[
1619,
1626
],
[
1850,
1857
],
[
1884,
1891
],
[
1919,
1926
],
[
2052,
2059
],
[
2090,
2097
],
[
2152,
2159
],
[
2189,
2196
],
[
2420,
2427
],
[
2454,
2461
],
[
2489,
2496
],
[
2622,
2629
],
[
2660,
2667
],
[
2722,
2729
],
[
2759,
2766
],
[
2990,
2997
],
[
3025,
3032
],
[
3062,
3069
],
[
3524,
3531
],
[
3761,
3768
],
[
3796,
3803
],
[
3833,
3840
],
[
4295,
4302
],
[
4532,
4539
],
[
4567,
4574
],
[
4604,
4611
],
[
5066,
5073
],
[
5303,
5310
],
[
5338,
5345
],
[
5375,
5382
],
[
5837,
5844
],
[
6076,
6083
],
[
6111,
6118
],
[
6148,
6155
],
[
6610,
6617
],
[
6849,
6856
],
[
6884,
6891
],
[
6921,
6928
],
[
7383,
7390
],
[
7622,
7629
],
[
7657,
7664
],
[
7694,
7701
],
[
8156,
8163
],
[
8395,
8402
],
[
8430,
8437
],
[
8467,
8474
],
[
8929,
8936
],
[
9168,
9175
],
[
9203,
9210
],
[
9240,
9247
],
[
9702,
9709
],
[
9941,
9948
],
[
9976,
9983
],
[
10013,
10020
],
[
10475,
10482
],
[
10714,
10721
],
[
10749,
10756
],
[
10786,
10793
],
[
11248,
11255
],
[
11487,
11494
],
[
11522,
11529
],
[
11559,
11566
],
[
12021,
12028
],
[
12290,
12297
]
],
[
[
199,
204
],
[
622,
627
],
[
744,
749
],
[
1048,
1053
]
],
[
[
206,
212
]
],
[
[
214,
222
]
],
[
[
224,
230
],
[
511,
517
],
[
943,
949
],
[
1244,
1250
],
[
1814,
1820
],
[
2384,
2390
],
[
2954,
2960
],
[
3725,
3731
],
[
4496,
4502
],
[
5267,
5273
],
[
6040,
6046
],
[
6813,
6819
],
[
7586,
7592
],
[
8359,
8365
],
[
9132,
9138
],
[
9905,
9911
],
[
10678,
10684
],
[
11451,
11457
],
[
12224,
12230
]
],
[
[
253,
260
]
],
[
[
262,
269
]
],
[
[
271,
281
]
],
[
[
283,
287
]
],
[
[
309,
312
]
],
[
[
346,
349
],
[
491,
494
],
[
923,
926
],
[
1224,
1227
],
[
1794,
1797
],
[
2364,
2367
],
[
2934,
2937
],
[
3705,
3708
],
[
4476,
4479
],
[
5247,
5250
],
[
6020,
6023
],
[
6793,
6796
],
[
7566,
7569
],
[
8339,
8342
],
[
9112,
9115
],
[
9885,
9888
],
[
10658,
10661
],
[
11431,
11434
],
[
12204,
12207
]
],
[
[
351,
358
],
[
368,
375
],
[
800,
807
],
[
1101,
1108
],
[
1671,
1678
],
[
2241,
2248
],
[
2811,
2818
],
[
3582,
3589
],
[
4353,
4360
],
[
5124,
5131
],
[
5895,
5902
],
[
6668,
6675
],
[
7441,
7448
],
[
8214,
8221
],
[
8987,
8994
],
[
9760,
9767
],
[
10533,
10540
],
[
11306,
11313
],
[
12079,
12086
]
],
[
[
360,
364
],
[
381,
385
],
[
813,
817
],
[
1114,
1118
],
[
1684,
1688
],
[
2254,
2258
],
[
2824,
2828
],
[
3595,
3599
],
[
4366,
4370
],
[
5137,
5141
],
[
5908,
5912
],
[
6681,
6685
],
[
7454,
7458
],
[
8227,
8231
],
[
9000,
9004
],
[
9773,
9777
],
[
10546,
10550
],
[
11319,
11323
],
[
12092,
12096
]
],
[
[
456,
490
]
],
[
[
888,
922
]
],
[
[
1189,
1223
]
],
[
[
1759,
1793
]
],
[
[
2329,
2363
]
],
[
[
2899,
2933
]
],
[
[
3670,
3704
]
],
[
[
4441,
4475
]
],
[
[
5212,
5246
]
],
[
[
5984,
6019
]
],
[
[
6757,
6792
]
],
[
[
7530,
7565
]
],
[
[
8303,
8338
]
],
[
[
9076,
9111
]
],
[
[
9849,
9884
]
],
[
[
10622,
10657
]
],
[
[
11395,
11430
]
],
[
[
12168,
12203
]
]
] |
# Generated by Django 2.0.4 on 2018-04-24 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='post',
old_name='sub_Title',
new_name='sub_title',
),
migrations.RemoveField(
model_name='comment',
name='vote',
),
migrations.AddField(
model_name='comment',
name='text',
field=models.CharField(default=1, max_length=150),
preserve_default=False,
),
]
| [
[
[
71,
81
],
[
108,
118
],
[
221,
231
],
[
363,
373
],
[
465,
475
]
],
[
[
83,
89
],
[
563,
569
]
],
[
[
98,
107
]
]
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import mock
from exam import fixture
from sentry.interfaces.template import Template
from sentry.models import Event
from sentry.testutils import TestCase
class TemplateTest(TestCase):
@fixture
def interface(self):
return Template.to_python(dict(
filename='foo.html',
context_line='hello world',
lineno=1,
))
def test_serialize(self):
result = self.interface.to_json()
self.assertEquals(result['filename'], 'foo.html')
self.assertEquals(result['context_line'], 'hello world')
self.assertEquals(result['lineno'], 1)
def test_get_hash(self):
result = self.interface.get_hash()
self.assertEquals(result, ['foo.html', 'hello world'])
@mock.patch('sentry.interfaces.template.get_context')
@mock.patch('sentry.interfaces.template.Template.get_traceback')
def test_to_string_returns_traceback(self, get_traceback, get_context):
get_traceback.return_value = 'traceback'
event = mock.Mock(spec=Event)
result = self.interface.to_string(event)
get_traceback.assert_called_once_with(event, get_context.return_value)
self.assertEquals(result, 'Stacktrace (most recent call last):\n\ntraceback')
def test_serialize_unserialize_behavior(self):
result = type(self.interface).to_python(self.interface.to_json())
assert result.to_json() == self.interface.to_json()
| [
[
[
48,
63
]
],
[
[
72,
76
],
[
823,
827
],
[
881,
885
],
[
1086,
1090
]
],
[
[
95,
102
],
[
259,
266
]
],
[
[
143,
151
],
[
307,
315
]
],
[
[
178,
183
],
[
1101,
1106
]
],
[
[
213,
221
],
[
243,
251
]
],
[
[
230,
242
]
]
] |
from django.shortcuts import render
def contrib_file(request):
return render(request, "dpaste/contribute.html")
| [
[
[
29,
35
],
[
76,
82
]
],
[
[
42,
54
]
]
] |
from tkinter import messagebox
from ClientInsert import *
class ClientEdit(ClientInsert):
def __init__(self, db, id_cliente, master):
super().__init__(db, master)
self.title('Editar Cliente')
self.__id_cliente = id_cliente
self.__list = master
table_cliente = db.select("CLIENTE", ["*"],
['id_cliente'], [str(id_cliente)])[0]
table_municipio = db.select("MUNICIPIO",
["id_uf_municipio", "nome_municipio"],
["id_municipio"],
[str(table_cliente["id_municipio_cliente"])])[0]
table_uf = db.select("UF",
["nome_uf"],
["id_uf"],
[str(table_municipio["id_uf_municipio"])])[0]
table_telefone = db.select("TELEFONE",
["numero_telefone", "ddd_telefone"],
["id_cliente_telefone"],
[str(id_cliente)])
telefones = ""
for telefone in table_telefone:
if (telefone['ddd_telefone'] != 0 and
telefone['numero_telefone'] != 0):
telefones += str(telefone['ddd_telefone'])
telefones += str(telefone['numero_telefone'])
self._ClientForm__str_rsocial.set(
table_cliente['rsocial_cliente'])
self._ClientForm__str_nfantasia.set(
table_cliente['nfantasia_cliente'])
self._ClientForm__tracer_cnpj.set(
str(table_cliente['cnpj_cliente']))
self._ClientForm__tracer_iestadual.set(
str(table_cliente['iestadual_cliente']))
self._ClientForm__tracer_imunicipal.set(
str(table_cliente['imunicipal_cliente']))
self._ClientForm__str_logradouro.set(
table_cliente['logradouro_cliente'])
self._ClientForm__str_complemento.set(
table_cliente['complemento_cliente'])
self._ClientForm__tracer_cep.set(
str(table_cliente['cep_cliente']))
self._ClientForm__tracer_telefone.set(
telefones)
celular = str(table_cliente['ddd_cel_cliente'])
celular += str(table_cliente['ncel_cliente'])
self._ClientForm__tracer_ncel.set(
celular)
self._ClientForm__str_bairro.set(
table_cliente['bairro_cliente'])
self._ClientForm__str_email.set(
table_cliente['email_cliente'])
self._ClientForm__str_url.set(
table_cliente['url_cliente'])
self._ClientForm__str_municipio.set(table_municipio["nome_municipio"])
self._ClientForm__str_uf.set(table_uf["nome_uf"])
self._ClientForm__int_whatsapp.set(table_cliente["whatsapp_cliente"])
self._ClientForm__button_salvar.config(
command=self.__button_salvar_action)
for i in range(0, len(self._ClientInsert__list_ufs)):
if self._ClientInsert__list_ufs[i] == table_uf['nome_uf']:
self._ClientForm__combo_uf.current(i)
for i in range(0, len(self._ClientInsert__list_municipios)):
if self._ClientInsert__list_municipios[i] == table_municipio['nome_municipio']:
self._ClientForm__combo_municipio.current(i)
def __button_salvar_action(self):
data = self._ClientInsert__data_validation()
if data == None:
return
else:
self.__database_update(data)
def __database_update(self, data):
rsocial = data[0]
nfantasia = data[1]
cnpj = data[2]
iestadual = data[3]
imunicipal = data[4]
logradouro = data[5]
complemento = data[6]
bairro = data[7]
municipio = data[8]
uf = data[9]
cep = data[10]
telefone = data[11]
ncel = data[12]
whatsapp = data[13]
email = data[14]
url = data[15]
uf_id = str(self._ClientInsert__db.select("UF",
['id_uf'], ['nome_uf'], [uf])[0]['id_uf'])
municipio_id = self._ClientInsert__db.select("MUNICIPIO",
['id_municipio'],
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
if len(municipio_id) == 0:
self._ClientInsert__db.insert("MUNICIPIO",
['nome_municipio', 'id_uf_municipio'],
[municipio, uf_id])
municipio_id = str(
self._ClientInsert__db.last_insert_id()[0]['LAST_INSERT_ID()'])
else:
municipio_id = str(municipio_id[0]['id_municipio'])
self._ClientInsert__db.update("CLIENTE",
['bairro_cliente',
'cep_cliente',
'rsocial_cliente',
'ncel_cliente',
'ddd_cel_cliente',
'nfantasia_cliente',
'whatsapp_cliente',
'cnpj_cliente',
'iestadual_cliente',
'imunicipal_cliente',
'logradouro_cliente',
'email_cliente',
'complemento_cliente',
'url_cliente',
'id_municipio_cliente'],
[bairro,
cep,
rsocial,
ncel[2:],
ncel[:2],
nfantasia,
whatsapp,
cnpj,
iestadual,
imunicipal,
logradouro,
email,
complemento,
url,
municipio_id],
['id_cliente'],
[str(self.__id_cliente)])
table_telefone_id = self._ClientInsert__db.select("TELEFONE",
['id_telefone'],
['id_cliente_telefone'],
[str(self.__id_cliente)])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone,
self._ClientInsert__number_telefone],
['id_telefone'],
[str(table_telefone_id[0]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone2,
self._ClientInsert__number_telefone2],
['id_telefone'],
[str(table_telefone_id[1]['id_telefone'])])
self._ClientInsert__db.update("TELEFONE",
['ddd_telefone',
'numero_telefone'],
[self._ClientInsert__ddd_telefone3,
self._ClientInsert__number_telefone3],
['id_telefone'],
[str(table_telefone_id[2]['id_telefone'])])
messagebox.showinfo("Informação", "Dados alterados!", parent=self)
self.destroy()
self.__list.filter_client()
| [
[
[
20,
30
],
[
6917,
6927
]
],
[
[
57,
58
],
[
77,
89
]
],
[
[
66,
76
]
]
] |
template = """<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Title of the document</title>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.3.0/milligram.min.css">
<style>
.tradingview-widget-container {{
position: sticky;
top: 20px;
}}
.stocks-view {{
display: flex;
flex-wrap: nowrap;
}}
.stocks-listing {{
width: 780px;
flex-wrap: nowrap;
padding: 20px;
}}
.stocks-graph {{
flex-wrap: nowrap;
padding: 20px;
}}
th.sticky-header {{
position: sticky;
top: 0;
z-index: 10;
background-color: white;
}}
.positive-movement {{
color: green;
font-weight: bold;
}}
.negative-movement {{
color: red;
font-weight: bold;
}}
.blue-category {{
background-color: lightsteelblue;
}}
</style>
</head>
<body>
{}
<div class="stocks-view">
<div class="stocks-listing">
<table>
<thead>
<tr>
<th class="sticky-header">Symbol</th>
<th class="sticky-header">April 1 2019</th>
<th class="sticky-header">Dec 2 2019</th>
<th class="sticky-header">Today</th>
<th class="sticky-header">Movement since April 1 2019</th>
<th class="sticky-header">Movement since Dec 2 2019</th>
<th class="sticky-header">Bankruptcy probability</th>
</tr>
</thead>
<tbody>
{}
</tbody>
</table>
</div>
<div class="stocks-graph"
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div id="tradingview_63a66"></div>
<div class="tradingview-widget-copyright"><a href="https://www.tradingview.com/symbols/AAPL/" rel="noopener" target="_blank"><span class="blue-text">AAPL Chart</span></a> by TradingView</div>
</div>
<!-- TradingView Widget END -->
</div>
</div>
<script type="text/javascript">
function renderChart(symbol) {{
new TradingView.widget(
{{
"width": 750,
"height": 500,
"symbol": symbol,
"interval": "180",
"timezone": "Etc/UTC",
"theme": "light",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"allow_symbol_change": true,
"container_id": "tradingview_63a66"
}}
);
}}
document.addEventListener('DOMContentLoaded', function(){{
renderChart('BA');
}}, false);
</script>
</body>
</html>"""
| [
[
[
0,
8
]
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.