code
stringlengths
10
805k
def_use_chains
sequencelengths
0
667
""" MD Analysis =========== #. :class:`.MDAnalysis` Class for converting a molecule to and back from an MDAnalysis object. """ import logging from ...utilities import WrapperNotInstalledException try: import MDAnalysis as mda except ModuleNotFoundError: mda = None logger = logging.getLogger(__name__) class MDAnalysis: """ Converter for :class:`stk.Molecule` to and from MDAnalysis. Examples -------- An stk molecule can be converted into an MDAnalysis Universe. .. code-block:: python import stk import stko stkmol = stk.BuildingBlock('NCCNCCN').with_centroid( position=np.array((10, 10, 10)) ) universe = stko.MDAnalysis().get_universe(stkmol) print('R_g:', universe.atoms.radius_of_gyration()) print('B_sphere:', universe.atoms.bsphere()) print('Universe COM:', universe.atoms.center_of_mass()) print('stk centroid:', stkmol.get_centroid()) """ def __init__(self): if mda is None: raise WrapperNotInstalledException( 'MDAnalysis is not installed; see README for ' 'installation.' ) def get_universe(self, mol): """ Get an MDAnalysis object. Parameters ---------- mol : :class:`stk.Molecule` Molecule to convert. Returns ------- :class:`MDAnalysis.Universe` The MDAnalysis Universe of the molecule. """ rdkit_mol = mol.to_rdkit_mol() return mda.Universe(rdkit_mol)
[ [ [ 138, 145 ], [ 289, 296 ] ], [ [ 172, 200 ], [ 1052, 1080 ] ], [ [ 218, 235 ], [ 1021, 1024 ], [ 1570, 1573 ] ], [ [ 268, 271 ], [ 1021, 1024 ], [ 1570, 1573 ] ], [ [ 280, 286 ] ], [ [ 325, 335 ] ] ]
def fact (n): if n == 1: return 1 else: return n*fact(n-1) print(fact(5))
[ [ [ 4, 8 ], [ 90, 94 ], [ 73, 77 ] ] ]
import torch from torch.optim import Optimizer class OptimWrapper(Optimizer): # Mixin class that defines convenient functions for writing Optimizer Wrappers def __init__(self, optim): self.optim = optim def __getstate__(self): return self.optim.__getstate__() def __setstate__(self, state): self.optim.__setstate__(state) @property def state(self): return self.optim.state @property def param_groups(self): return self.optim.param_groups @param_groups.setter def param_groups(self, value): self.optim.param_groups = value def state_dict(self): return self.optim.state_dict() def load_state_dict(self, state_dict): self.optim.load_state_dict(state_dict) def zero_grad(self): self.optim.zero_grad() def add_param_group(self, param_group): self.optim.add_param_group(param_group) @property def defaults(self): return self.optim.defaults @defaults.setter def defaults(self, defaults): self.optim.defaults = defaults @torch.no_grad() def step(self, closure=None): self.optim.step(closure=closure) def __repr__(self): return "%s(%r)" % (self.__class__.__name__, self.optim)
[ [ [ 7, 12 ], [ 1148, 1153 ] ], [ [ 38, 47 ], [ 70, 79 ] ], [ [ 57, 69 ] ] ]
import os from pytest import fixture @fixture(scope='function') def environ(request): origin = dict(os.environ) @request.addfinalizer def restore_environ(): os.environ.clear() os.environ.update(origin) return os.environ
[ [ [ 7, 9 ], [ 107, 109 ], [ 246, 248 ], [ 181, 183 ], [ 208, 210 ] ], [ [ 30, 37 ], [ 41, 48 ] ], [ [ 71, 78 ] ] ]
#!/usr/bin/python3 ''' (C) Copyright 2018-2021 Intel Corporation. SPDX-License-Identifier: BSD-2-Clause-Patent ''' from apricot import TestWithServers from test_utils_pool import TestPool class DestroyRebuild(TestWithServers): """Test class for pool destroy tests. Test Class Description: This test verifies destruction of a pool that is rebuilding. :avocado: recursive """ # also remove the commented line form yaml file for rank 0 CANCEL_FOR_TICKET = [["DAOS-4891", "rank_to_kill", "[0]"]] def test_destroy_while_rebuilding(self): """Jira ID: DAOS-xxxx. Test Description: Create a pool across multiple servers. After excluding one of the servers, verify that the pool can be destroyed during rebuild. Use Cases: Verifying that a pool can be destroyed during rebuild. :avocado: tags=all,daily_regression,medium :avocado: tags=pool,destroypoolrebuild """ # Get the test parameters self.pool = TestPool(self.context, self.get_dmg_command()) self.pool.get_params(self) targets = self.params.get("targets", "/run/server_config/servers/*") ranks = self.params.get("rank_to_kill", "/run/testparams/*") # Create a pool self.pool.create() # Verify the pool information before starting rebuild checks = { "pi_nnodes": len(self.hostlist_servers), "pi_ntargets": len(self.hostlist_servers) * targets, "pi_ndisabled": 0, } self.assertTrue( self.pool.check_pool_info(**checks), "Invalid pool information detected prior to rebuild") # Start rebuild self.server_managers[0].stop_ranks(ranks, self.d_log, force=True) self.pool.wait_for_rebuild(True) # Destroy the pool while rebuild is active self.pool.destroy() self.log.info("Test Passed") self.get_dmg_command().system_start(",".join(ranks)) self.server_managers[0].update_expected_states(",".join(ranks), ["joined"])
[ [ [ 140, 155 ], [ 215, 230 ] ], [ [ 184, 192 ], [ 1042, 1050 ] ], [ [ 200, 214 ] ] ]
import os import re import socket import ttfw_idf @ttfw_idf.idf_example_test(env_tag='Example_WIFI_Protocols') def test_examples_protocol_asio_udp_server(env, extra_data): """ steps: | 1. join AP 2. Start server 3. Test connects to server and sends a test message 4. Test evaluates received test message from server 5. Test evaluates received test message on server stdout """ test_msg = b'echo message from client to server' dut1 = env.get_dut('udp_echo_server', 'examples/protocols/asio/udp_echo_server', dut_class=ttfw_idf.ESP32DUT) # check and log bin size binary_file = os.path.join(dut1.app.binary_path, 'asio_udp_echo_server.bin') bin_size = os.path.getsize(binary_file) ttfw_idf.log_performance('asio_udp_echo_server_bin_size', '{}KB'.format(bin_size // 1024)) # 1. start test dut1.start_app() # 2. get the server IP address data = dut1.expect(re.compile(r' IPv4 address: ([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)'), timeout=30) # 3. create tcp client and connect to server cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) cli.settimeout(30) cli.connect((data[0], 2222)) cli.send(test_msg) data = cli.recv(1024) # 4. check the message received back from the server if (data == test_msg): print('PASS: Received correct message') pass else: print('Failure!') raise ValueError('Wrong data received from asio udp server: {} (expected:{})'.format(data, test_msg)) # 5. check the client message appears also on server terminal dut1.expect(test_msg.decode()) if __name__ == '__main__': test_examples_protocol_asio_udp_server()
[ [ [ 7, 9 ], [ 636, 638 ], [ 714, 716 ] ], [ [ 17, 19 ], [ 937, 939 ] ], [ [ 27, 33 ], [ 1072, 1078 ], [ 1086, 1092 ], [ 1102, 1108 ] ], [ [ 42, 50 ], [ 54, 62 ], [ 570, 578 ], [ 747, 755 ] ], [ [ 118, 156 ], [ 1651, 1689 ] ] ]
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import logging import os import shutil import tempfile import time import traceback from collections import OrderedDict from urllib.parse import quote_plus import ctk import qt import SampleData import SimpleITK as sitk import sitkUtils import slicer import vtk import vtkSegmentationCore from MONAILabelLib import GenericAnatomyColors, MONAILabelClient from slicer.ScriptedLoadableModule import * from slicer.util import VTKObservationMixin class MONAILabel(ScriptedLoadableModule): def __init__(self, parent): ScriptedLoadableModule.__init__(self, parent) self.parent.title = "MONAILabel" self.parent.categories = ["Active Learning"] self.parent.dependencies = [] self.parent.contributors = ["NVIDIA, KCL"] self.parent.helpText = """ Active Learning solution. See more information in <a href="https://github.com/Project-MONAI/MONAILabel">module documentation</a>. """ self.parent.acknowledgementText = """ Developed by NVIDIA, KCL """ # Additional initialization step after application startup is complete slicer.app.connect("startupCompleted()", self.initializeAfterStartup) def initializeAfterStartup(self): if not slicer.app.commandOptions().noMainWindow: self.settingsPanel = MONAILabelSettingsPanel() slicer.app.settingsDialog().addPanel("MONAI Label", self.settingsPanel) class _ui_MONAILabelSettingsPanel(object): def __init__(self, parent): vBoxLayout = qt.QVBoxLayout(parent) # settings groupBox = ctk.ctkCollapsibleGroupBox() groupBox.title = "MONAI Label Server" groupLayout = qt.QFormLayout(groupBox) serverUrl = qt.QLineEdit() groupLayout.addRow("Server address:", serverUrl) parent.registerProperty("MONAILabel/serverUrl", serverUrl, "text", str(qt.SIGNAL("textChanged(QString)"))) serverUrlHistory = qt.QLineEdit() groupLayout.addRow("Server address history:", serverUrlHistory) parent.registerProperty( "MONAILabel/serverUrlHistory", serverUrlHistory, "text", str(qt.SIGNAL("textChanged(QString)")) ) fileExtension = qt.QLineEdit() fileExtension.setText(".nii.gz") fileExtension.toolTip = "Default extension for uploading images/labels" groupLayout.addRow("File Extension:", fileExtension) parent.registerProperty( "MONAILabel/fileExtension", fileExtension, "text", str(qt.SIGNAL("textChanged(QString)")) ) clientId = qt.QLineEdit() clientId.setText("user-xyz") clientId.toolTip = "Client/User ID that will be sent to MONAI Label server for reference" groupLayout.addRow("Client/User-ID:", clientId) parent.registerProperty("MONAILabel/clientId", clientId, "text", str(qt.SIGNAL("textChanged(QString)"))) autoRunSegmentationCheckBox = qt.QCheckBox() autoRunSegmentationCheckBox.checked = False autoRunSegmentationCheckBox.toolTip = ( "Enable this option to auto run segmentation if pre-trained model exists when Next Sample is fetched" ) groupLayout.addRow("Auto-Run Pre-Trained Model:", autoRunSegmentationCheckBox) parent.registerProperty( "MONAILabel/autoRunSegmentationOnNextSample", ctk.ctkBooleanMapper(autoRunSegmentationCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))), "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), ) autoFetchNextSampleCheckBox = qt.QCheckBox() autoFetchNextSampleCheckBox.checked = False autoFetchNextSampleCheckBox.toolTip = "Enable this option to fetch Next Sample after saving the label" groupLayout.addRow("Auto-Fetch Next Sample:", autoFetchNextSampleCheckBox) parent.registerProperty( "MONAILabel/autoFetchNextSample", ctk.ctkBooleanMapper(autoFetchNextSampleCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))), "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), ) autoUpdateModelCheckBox = qt.QCheckBox() autoUpdateModelCheckBox.checked = True autoUpdateModelCheckBox.toolTip = "Enable this option to auto update model after submitting the label" groupLayout.addRow("Auto-Update Model:", autoUpdateModelCheckBox) parent.registerProperty( "MONAILabel/autoUpdateModel", ctk.ctkBooleanMapper(autoUpdateModelCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))), "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), ) askForUserNameCheckBox = qt.QCheckBox() askForUserNameCheckBox.checked = False askForUserNameCheckBox.toolTip = "Enable this option to ask for the user name every time the MONAILabel extension is loaded for the first time" groupLayout.addRow("Ask For User Name:", askForUserNameCheckBox) parent.registerProperty( "MONAILabel/askForUserName", ctk.ctkBooleanMapper(askForUserNameCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))), "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), ) allowOverlapCheckBox = qt.QCheckBox() allowOverlapCheckBox.checked = False allowOverlapCheckBox.toolTip = "Enable this option to allow overlapping segmentations" groupLayout.addRow("Allow Overlapping Segmentations:", allowOverlapCheckBox) parent.registerProperty( "MONAILabel/allowOverlappingSegments", ctk.ctkBooleanMapper(allowOverlapCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))), "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), ) allowOverlapCheckBox.connect("toggled(bool)", self.onUpdateAllowOverlap) developerModeCheckBox = qt.QCheckBox() developerModeCheckBox.checked = False developerModeCheckBox.toolTip = "Enable this option to find options tab etc..." groupLayout.addRow("Developer Mode:", developerModeCheckBox) parent.registerProperty( "MONAILabel/developerMode", ctk.ctkBooleanMapper(developerModeCheckBox, "checked", str(qt.SIGNAL("toggled(bool)"))), "valueAsInt", str(qt.SIGNAL("valueAsIntChanged(int)")), ) vBoxLayout.addWidget(groupBox) vBoxLayout.addStretch(1) def onUpdateAllowOverlap(self): if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool): if slicer.util.settingsValue("MONAILabel/fileExtension", None) != ".seg.nrrd": slicer.util.warningDisplay( "Overlapping segmentations are only availabel with the '.seg.nrrd' file extension! Consider changing MONAILabel file extension." ) class MONAILabelSettingsPanel(ctk.ctkSettingsPanel): def __init__(self, *args, **kwargs): ctk.ctkSettingsPanel.__init__(self, *args, **kwargs) self.ui = _ui_MONAILabelSettingsPanel(self) class MONAILabelWidget(ScriptedLoadableModuleWidget, VTKObservationMixin): def __init__(self, parent=None): """ Called when the user opens the module the first time and the widget is initialized. """ ScriptedLoadableModuleWidget.__init__(self, parent) VTKObservationMixin.__init__(self) # needed for parameter node observation self.logic = None self._parameterNode = None self._volumeNode = None self._segmentNode = None self._volumeNodes = [] self._updatingGUIFromParameterNode = False self._scribblesEditorWidget = None self.info = {} self.models = OrderedDict() self.trainers = OrderedDict() self.config = OrderedDict() self.current_sample = None self.samples = {} self.state = { "SegmentationModel": "", "DeepgrowModel": "", "ScribblesMethod": "", "CurrentStrategy": "", "CurrentTrainer": "", } self.file_ext = ".nii.gz" self.dgPositiveFiducialNode = None self.dgPositiveFiducialNodeObservers = [] self.dgNegativeFiducialNode = None self.dgNegativeFiducialNodeObservers = [] self.ignoreFiducialNodeAddEvent = False self.progressBar = None self.tmpdir = None self.timer = None self.scribblesMode = None self.multi_label = False def setup(self): """ Called when the user opens the module the first time and the widget is initialized. """ ScriptedLoadableModuleWidget.setup(self) # Load widget from .ui file (created by Qt Designer). # Additional widgets can be instantiated manually and added to self.layout. uiWidget = slicer.util.loadUI(self.resourcePath("UI/MONAILabel.ui")) self.layout.addWidget(uiWidget) self.ui = slicer.util.childWidgetVariables(uiWidget) # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's # "mrmlSceneChanged(vtkMRMLScene*)" signal in is connected to each MRML widget's. # "setMRMLScene(vtkMRMLScene*)" slot. uiWidget.setMRMLScene(slicer.mrmlScene) # These connections ensure that we update parameter node when scene is closed self.addObserver(slicer.mrmlScene, slicer.mrmlScene.StartCloseEvent, self.onSceneStartClose) self.addObserver(slicer.mrmlScene, slicer.mrmlScene.EndCloseEvent, self.onSceneEndClose) self.addObserver(slicer.mrmlScene, slicer.mrmlScene.NodeAddedEvent, self.onSceneEndImport) # Create logic class. Logic implements all computations that should be possible to run # in batch mode, without a graphical user interface. self.tmpdir = slicer.util.tempDirectory("slicer-monai-label") self.logic = MONAILabelLogic(self.tmpdir) # Set icons and tune widget properties self.ui.serverComboBox.lineEdit().setPlaceholderText("enter server address or leave empty to use default") self.ui.fetchServerInfoButton.setIcon(self.icon("refresh-icon.png")) self.ui.segmentationButton.setIcon(self.icon("segment.png")) self.ui.nextSampleButton.setIcon(self.icon("segment.png")) self.ui.saveLabelButton.setIcon(self.icon("save.png")) self.ui.trainingButton.setIcon(self.icon("training.png")) self.ui.stopTrainingButton.setIcon(self.icon("stop.png")) self.ui.uploadImageButton.setIcon(self.icon("upload.svg")) self.ui.importLabelButton.setIcon(self.icon("download.png")) self.ui.dgPositiveFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene) self.ui.dgPositiveFiducialPlacementWidget.placeButton().toolTip = "Select +ve points" self.ui.dgPositiveFiducialPlacementWidget.buttonsVisible = False self.ui.dgPositiveFiducialPlacementWidget.placeButton().show() self.ui.dgPositiveFiducialPlacementWidget.deleteButton().show() self.ui.dgNegativeFiducialPlacementWidget.setMRMLScene(slicer.mrmlScene) self.ui.dgNegativeFiducialPlacementWidget.placeButton().toolTip = "Select -ve points" self.ui.dgNegativeFiducialPlacementWidget.buttonsVisible = False self.ui.dgNegativeFiducialPlacementWidget.placeButton().show() self.ui.dgNegativeFiducialPlacementWidget.deleteButton().show() self.ui.dgUpdateButton.setIcon(self.icon("segment.png")) # Connections self.ui.fetchServerInfoButton.connect("clicked(bool)", self.onClickFetchInfo) self.ui.serverComboBox.connect("currentIndexChanged(int)", self.onClickFetchInfo) self.ui.segmentationModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI) self.ui.segmentationButton.connect("clicked(bool)", self.onClickSegmentation) self.ui.deepgrowModelSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI) self.ui.nextSampleButton.connect("clicked(bool)", self.onNextSampleButton) self.ui.trainingButton.connect("clicked(bool)", self.onTraining) self.ui.stopTrainingButton.connect("clicked(bool)", self.onStopTraining) self.ui.saveLabelButton.connect("clicked(bool)", self.onSaveLabel) self.ui.uploadImageButton.connect("clicked(bool)", self.onUploadImage) self.ui.importLabelButton.connect("clicked(bool)", self.onImportLabel) self.ui.labelComboBox.connect("currentIndexChanged(int)", self.onSelectLabel) self.ui.dgUpdateButton.connect("clicked(bool)", self.onUpdateDeepgrow) self.ui.dgUpdateCheckBox.setStyleSheet("padding-left: 10px;") # Scribbles # brush and eraser icon from: https://tablericons.com/ self.ui.scribblesMethodSelector.connect("currentIndexChanged(int)", self.updateParameterNodeFromGUI) self.ui.paintScribblesButton.setIcon(self.icon("paint.png")) self.ui.paintScribblesButton.setToolTip("Paint scribbles for selected scribble layer") self.ui.eraseScribblesButton.setIcon(self.icon("eraser.png")) self.ui.eraseScribblesButton.setToolTip("Erase scribbles for selected scribble layer") self.ui.updateScribblesButton.setIcon(self.icon("segment.png")) self.ui.updateScribblesButton.setToolTip( "Update label by sending scribbles to server to apply selected post processing method" ) self.ui.brushSizeSlider.connect("valueChanged(double)", self.updateBrushSize) self.ui.brushSizeSlider.setToolTip("Change brush size for scribbles tool") self.ui.brush3dCheckbox.stateChanged.connect(self.on3dBrushCheckbox) self.ui.brush3dCheckbox.setToolTip("Use 3D brush to paint/erase in multiple slices in 3D") self.ui.updateScribblesButton.clicked.connect(self.onUpdateScribbles) self.ui.paintScribblesButton.clicked.connect(self.onPaintScribbles) self.ui.eraseScribblesButton.clicked.connect(self.onEraseScribbles) self.ui.scribblesLabelSelector.connect("currentIndexChanged(int)", self.onSelectScribblesLabel) # creating editable combo box self.ui.scribblesLabelSelector.addItem(self.icon("fg_green.png"), "Foreground") self.ui.scribblesLabelSelector.addItem(self.icon("bg_red.png"), "Background") self.ui.scribblesLabelSelector.setCurrentIndex(0) # start with scribbles section disabled self.ui.scribblesCollapsibleButton.setEnabled(False) self.ui.scribblesCollapsibleButton.collapsed = True # embedded segment editor self.ui.embeddedSegmentEditorWidget.setMRMLScene(slicer.mrmlScene) self.ui.embeddedSegmentEditorWidget.setSegmentationNodeSelectorVisible(False) self.ui.embeddedSegmentEditorWidget.setMasterVolumeNodeSelectorVisible(False) self.initializeParameterNode() self.updateServerUrlGUIFromSettings() # self.onClickFetchInfo() if slicer.util.settingsValue("MONAILabel/askForUserName", False, converter=slicer.util.toBool): text = qt.QInputDialog().getText( self.parent, "User Name", "Please enter your name:", qt.QLineEdit.Normal, slicer.util.settingsValue("MONAILabel/clientId", None), ) if text: settings = qt.QSettings() settings.setValue("MONAILabel/clientId", text) def cleanup(self): self.removeObservers() shutil.rmtree(self.tmpdir, ignore_errors=True) def enter(self): self.initializeParameterNode() if self._segmentNode: self.updateGUIFromParameterNode() def exit(self): self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode) def onSceneStartClose(self, caller, event): self.state = { "SegmentationModel": self.ui.segmentationModelSelector.currentText, "DeepgrowModel": self.ui.deepgrowModelSelector.currentText, "ScribblesMethod": self.ui.scribblesMethodSelector.currentText, "CurrentStrategy": self.ui.strategyBox.currentText, "CurrentTrainer": self.ui.trainerBox.currentText, } self._volumeNode = None self._segmentNode = None self._volumeNodes.clear() self.setParameterNode(None) self.current_sample = None self.samples.clear() self.resetFiducial( self.ui.dgPositiveFiducialPlacementWidget, self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers ) self.dgPositiveFiducialNode = None self.resetFiducial( self.ui.dgNegativeFiducialPlacementWidget, self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers ) self.dgNegativeFiducialNode = None self.onClearScribbles() def resetFiducial(self, fiducialWidget, fiducialNode, fiducialNodeObservers): if fiducialWidget.placeModeEnabled: fiducialWidget.setPlaceModeEnabled(False) if fiducialNode: slicer.mrmlScene.RemoveNode(fiducialNode) self.removeFiducialNodeObservers(fiducialNode, fiducialNodeObservers) def onSceneEndClose(self, caller, event): if self.parent.isEntered: self.initializeParameterNode() def onSceneEndImport(self, caller, event): if not self._volumeNode: self.updateGUIFromParameterNode() def initializeParameterNode(self): self.setParameterNode(self.logic.getParameterNode()) # Select default input nodes if nothing is selected yet to save a few clicks for the user if not self._parameterNode.GetNodeReference("InputVolume"): firstVolumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") if firstVolumeNode: self._parameterNode.SetNodeReferenceID("InputVolume", firstVolumeNode.GetID()) def setParameterNode(self, inputParameterNode): if inputParameterNode: self.logic.setDefaultParameters(inputParameterNode) if self._parameterNode is not None: self.removeObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode) self._parameterNode = inputParameterNode if self._parameterNode is not None: self.addObserver(self._parameterNode, vtk.vtkCommand.ModifiedEvent, self.updateGUIFromParameterNode) # Initial GUI update self.updateGUIFromParameterNode() def monitorTraining(self): status = self.isTrainingRunning(check_only=False) if status and status.get("status") == "RUNNING": info = self.logic.info() train_stats = info.get("train_stats") if not train_stats: return train_stats = next(iter(train_stats.values())) if train_stats else train_stats current = 0 if train_stats.get("total_time") else train_stats.get("epoch", 1) total = train_stats.get("total_epochs", 1) percent = max(1, 100 * current / total) if self.ui.trainingProgressBar.value != percent: self.ui.trainingProgressBar.setValue(percent) self.ui.trainingProgressBar.setToolTip(f"{current}/{total} epoch is completed") dice = train_stats.get("best_metric", 0) self.updateAccuracyBar(dice) return print("Training completed") self.ui.trainingProgressBar.setValue(100) self.timer.stop() self.timer = None self.ui.trainingProgressBar.setToolTip(f"Training: {status.get('status', 'DONE')}") self.ui.trainingButton.setEnabled(True) self.ui.stopTrainingButton.setEnabled(False) self.fetchInfo() def updateGUIFromParameterNode(self, caller=None, event=None): if self._parameterNode is None or self._updatingGUIFromParameterNode: return # Make sure GUI changes do not call updateParameterNodeFromGUI (it could cause infinite loop) self._updatingGUIFromParameterNode = True file_ext = slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext) self.file_ext = file_ext if file_ext else self.file_ext # Update node selectors and sliders self.ui.inputSelector.clear() for v in self._volumeNodes: self.ui.inputSelector.addItem(v.GetName()) self.ui.inputSelector.setToolTip(self.current_sample.get("name", "") if self.current_sample else "") if self._volumeNode: self.ui.inputSelector.setCurrentIndex(self.ui.inputSelector.findText(self._volumeNode.GetName())) self.ui.inputSelector.setEnabled(False) # Allow only one active scene self.ui.uploadImageButton.setEnabled(False) if self.info and slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") and self._volumeNode is None: self._volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") self.initSample({"id": self._volumeNode.GetName(), "session": True}, autosegment=False) self.ui.inputSelector.setEnabled(False) self.ui.uploadImageButton.setEnabled(self.current_sample and self.current_sample.get("session")) self.updateSelector(self.ui.segmentationModelSelector, ["segmentation"], "SegmentationModel", 0) self.updateSelector(self.ui.deepgrowModelSelector, ["deepgrow", "deepedit"], "DeepgrowModel", 0) self.updateSelector(self.ui.scribblesMethodSelector, ["scribbles"], "ScribblesMethod", 0) if self.models and [k for k, v in self.models.items() if v["type"] == "segmentation"]: self.ui.segmentationCollapsibleButton.collapsed = False if self.models and [k for k, v in self.models.items() if v["type"] in ("deepgrow", "deepedit")]: self.ui.deepgrowCollapsibleButton.collapsed = False if self.models and [k for k, v in self.models.items() if v["type"] == "scribbles"]: self.ui.scribblesCollapsibleButton.collapsed = False self.ui.labelComboBox.clear() if self._segmentNode: segmentation = self._segmentNode.GetSegmentation() totalSegments = segmentation.GetNumberOfSegments() segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)] for idx, segmentId in enumerate(segmentIds): segment = segmentation.GetSegment(segmentId) label = segment.GetName() if label in ["foreground_scribbles", "background_scribbles"]: continue self.ui.labelComboBox.addItem(label) else: for label in self.info.get("labels", {}): self.ui.labelComboBox.addItem(label) currentLabel = self._parameterNode.GetParameter("CurrentLabel") idx = self.ui.labelComboBox.findText(currentLabel) if currentLabel else 0 idx = 0 if idx < 0 < self.ui.labelComboBox.count else idx self.ui.labelComboBox.setCurrentIndex(idx) self.ui.appComboBox.clear() self.ui.appComboBox.addItem(self.info.get("name", "")) datastore_stats = self.info.get("datastore", {}) current = datastore_stats.get("completed", 0) total = datastore_stats.get("total", 0) self.ui.activeLearningProgressBar.setValue(current / max(total, 1) * 100) self.ui.activeLearningProgressBar.setToolTip(f"{current}/{total} samples are labeled") train_stats = self.info.get("train_stats", {}) train_stats = next(iter(train_stats.values())) if train_stats else train_stats dice = train_stats.get("best_metric", 0) self.updateAccuracyBar(dice) self.ui.strategyBox.clear() for strategy in self.info.get("strategies", {}): self.ui.strategyBox.addItem(strategy) currentStrategy = self._parameterNode.GetParameter("CurrentStrategy") currentStrategy = currentStrategy if currentStrategy else self.state["CurrentStrategy"] self.ui.strategyBox.setCurrentIndex(self.ui.strategyBox.findText(currentStrategy) if currentStrategy else 0) self.ui.trainerBox.clear() trainers = self.info.get("trainers", {}) if trainers: self.ui.trainerBox.addItem("ALL") for t in trainers: self.ui.trainerBox.addItem(t) currentTrainer = self._parameterNode.GetParameter("CurrentTrainer") currentTrainer = currentTrainer if currentTrainer else self.state["CurrentTrainer"] self.ui.trainerBox.setCurrentIndex(self.ui.trainerBox.findText(currentTrainer) if currentTrainer else 0) developer_mode = slicer.util.settingsValue("MONAILabel/developerMode", True, converter=slicer.util.toBool) self.ui.optionsCollapsibleButton.setVisible(developer_mode) # Enable/Disable self.ui.nextSampleButton.setEnabled(self.ui.strategyBox.count) is_training_running = True if self.info and self.isTrainingRunning() else False self.ui.trainingButton.setEnabled(self.info and not is_training_running and current) self.ui.stopTrainingButton.setEnabled(is_training_running) if is_training_running and self.timer is None: self.timer = qt.QTimer() self.timer.setInterval(5000) self.timer.connect("timeout()", self.monitorTraining) self.timer.start() self.ui.segmentationButton.setEnabled( self.ui.segmentationModelSelector.currentText and self._volumeNode is not None ) self.ui.saveLabelButton.setEnabled(self._segmentNode is not None) self.ui.importLabelButton.setEnabled(self._segmentNode is not None) # Create empty markup fiducial node for deep grow +ve and -ve if self._segmentNode: if not self.dgPositiveFiducialNode: self.dgPositiveFiducialNode, self.dgPositiveFiducialNodeObservers = self.createFiducialNode( "P", self.onDeepGrowFiducialNodeModified, [0.5, 1, 0.5] ) self.ui.dgPositiveFiducialPlacementWidget.setCurrentNode(self.dgPositiveFiducialNode) self.ui.dgPositiveFiducialPlacementWidget.setPlaceModeEnabled(False) if not self.dgNegativeFiducialNode: self.dgNegativeFiducialNode, self.dgNegativeFiducialNodeObservers = self.createFiducialNode( "N", self.onDeepGrowFiducialNodeModified, [0.5, 0.5, 1] ) self.ui.dgNegativeFiducialPlacementWidget.setCurrentNode(self.dgNegativeFiducialNode) self.ui.dgNegativeFiducialPlacementWidget.setPlaceModeEnabled(False) self.ui.scribblesCollapsibleButton.setEnabled(self.ui.scribblesMethodSelector.count) self.ui.scribblesCollapsibleButton.collapsed = False self.ui.dgPositiveFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText) self.ui.dgNegativeFiducialPlacementWidget.setEnabled(self.ui.deepgrowModelSelector.currentText) self.multi_label = "background" in self.info.get("labels", []) if self.multi_label: self.ui.dgLabelBackground.hide() self.ui.dgNegativeFiducialPlacementWidget.hide() self.ui.freezeUpdateCheckBox.show() self.ui.dgLabelForeground.setText("Landmarks:") else: self.ui.dgNegativeFiducialPlacementWidget.show() self.ui.freezeUpdateCheckBox.hide() self.ui.dgLabelForeground.setText("Foreground:") self.ui.dgUpdateCheckBox.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode) self.ui.dgUpdateButton.setEnabled(self.ui.deepgrowModelSelector.currentText and self._segmentNode) self.ui.embeddedSegmentEditorWidget.setMRMLSegmentEditorNode( slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLSegmentEditorNode") ) # All the GUI updates are done self._updatingGUIFromParameterNode = False def updateParameterNodeFromGUI(self, caller=None, event=None): if self._parameterNode is None or self._updatingGUIFromParameterNode: return wasModified = self._parameterNode.StartModify() # Modify all properties in a single batch segmentationModelIndex = self.ui.segmentationModelSelector.currentIndex if segmentationModelIndex >= 0: segmentationModel = self.ui.segmentationModelSelector.itemText(segmentationModelIndex) self._parameterNode.SetParameter("SegmentationModel", segmentationModel) deepgrowModelIndex = self.ui.deepgrowModelSelector.currentIndex if deepgrowModelIndex >= 0: deepgrowModel = self.ui.deepgrowModelSelector.itemText(deepgrowModelIndex) self._parameterNode.SetParameter("DeepgrowModel", deepgrowModel) scribblesMethodIndex = self.ui.scribblesMethodSelector.currentIndex if scribblesMethodIndex >= 0: scribblesMethod = self.ui.scribblesMethodSelector.itemText(scribblesMethodIndex) self._parameterNode.SetParameter("ScribblesMethod", scribblesMethod) currentLabelIndex = self.ui.labelComboBox.currentIndex if currentLabelIndex >= 0: currentLabel = self.ui.labelComboBox.itemText(currentLabelIndex) self._parameterNode.SetParameter("CurrentLabel", currentLabel) currentStrategyIndex = self.ui.strategyBox.currentIndex if currentStrategyIndex >= 0: currentStrategy = self.ui.strategyBox.itemText(currentStrategyIndex) self._parameterNode.SetParameter("CurrentStrategy", currentStrategy) currentTrainerIndex = self.ui.trainerBox.currentIndex if currentTrainerIndex >= 0: currentTrainer = self.ui.trainerBox.itemText(currentTrainerIndex) self._parameterNode.SetParameter("CurrentTrainer", currentTrainer) self._parameterNode.EndModify(wasModified) def updateSelector(self, selector, model_types, param, defaultIndex=0): wasSelectorBlocked = selector.blockSignals(True) selector.clear() for model_name, model in self.models.items(): if model["type"] in model_types: selector.addItem(model_name) selector.setItemData(selector.count - 1, model["description"], qt.Qt.ToolTipRole) model = self._parameterNode.GetParameter(param) model = model if model else self.state.get(param, "") modelIndex = selector.findText(model) modelIndex = defaultIndex if modelIndex < 0 < selector.count else modelIndex selector.setCurrentIndex(modelIndex) try: modelInfo = self.models[model] selector.setToolTip(modelInfo["description"]) except: selector.setToolTip("") selector.blockSignals(wasSelectorBlocked) def updateConfigTable(self): table = self.ui.configTable table.clear() headers = ["section", "name", "key", "value"] table.setColumnCount(len(headers)) table.setHorizontalHeaderLabels(headers) table.setColumnWidth(0, 50) config = copy.deepcopy(self.info) infer = config.get("models", {}) train = config.get("trainers", {}) activelearning = config.get("strategies", {}) scoring = config.get("scoring", {}) row_count = 0 config = {"infer": infer, "train": train, "activelearning": activelearning, "scoring": scoring} for c in config.values(): row_count += sum([len(c[k].get("config", {})) for k in c.keys()]) # print(f"Total rows: {row_count}") table.setRowCount(row_count) n = 0 for section in config: if not config[section]: continue c_section = config[section] l_section = sum([len(c_section[k].get("config", {})) for k in c_section.keys()]) if not l_section: continue # print(f"{n} => l_section = {l_section}") if l_section: table.setSpan(n, 0, l_section, 1) for name in c_section: c_name = c_section[name] l_name = len(c_name.get("config", {})) if not l_name: continue # print(f"{n} => l_name = {l_name}") if l_name: table.setSpan(n, 1, l_name, 1) for key, val in c_name.get("config", {}).items(): item = qt.QTableWidgetItem(section) item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable) table.setItem(n, 0, item) item = qt.QTableWidgetItem(name) table.setItem(n, 1, item) item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable) item = qt.QTableWidgetItem(key) table.setItem(n, 2, item) item.setFlags(item.flags() & ~qt.Qt.ItemIsEditable) if isinstance(val, dict) or isinstance(val, list): combo = qt.QComboBox() for m, v in enumerate(val): combo.addItem(v) combo.setCurrentIndex(0) table.setCellWidget(n, 3, combo) elif isinstance(val, bool): checkbox = qt.QCheckBox() checkbox.setChecked(val) table.setCellWidget(n, 3, checkbox) else: table.setItem(n, 3, qt.QTableWidgetItem(str(val) if val else "")) # print(f"{n} => {section} => {name} => {key} => {val}") n = n + 1 def updateAccuracyBar(self, dice): self.ui.accuracyProgressBar.setValue(dice * 100) css = ["stop: 0 red"] if dice > 0.5: css.append(f"stop: {0.5 / dice} orange") if dice > 0.6: css.append(f"stop: {0.6 / dice} yellow") if dice > 0.7: css.append(f"stop: {0.7 / dice} lightgreen") if dice > 0.8: css.append(f"stop: {0.8 / dice} green") if dice > 0.9: css.append(f"stop: {0.9 / dice} darkgreen") self.ui.accuracyProgressBar.setStyleSheet( "QProgressBar {text-align: center;} " "QProgressBar::chunk {background-color: " "qlineargradient(x0: 0, x2: 1, " + ",".join(css) + ")}" ) self.ui.accuracyProgressBar.setToolTip(f"Accuracy: {dice:.4f}") def getParamsFromConfig(self, filter, filter2=None): mapping = {"infer": "models", "train": "trainers", "activelearning": "strategies", "scoring": "scoring"} config = {} for row in range(self.ui.configTable.rowCount): section = str(self.ui.configTable.item(row, 0).text()) name = str(self.ui.configTable.item(row, 1).text()) key = str(self.ui.configTable.item(row, 2).text()) value = self.ui.configTable.item(row, 3) if value is None: value = self.ui.configTable.cellWidget(row, 3) value = value.checked if isinstance(value, qt.QCheckBox) else value.currentText else: value = str(value.text()) v = self.info.get(mapping.get(section, ""), {}).get(name, {}).get("config", {}).get(key, {}) if isinstance(v, int): value = int(value) if value else 0 elif isinstance(v, float): value = float(value) if value else 0.0 # print(f"{section} => {name} => {key} => {value}") if config.get(section) is None: config[section] = {} if config[section].get(name) is None: config[section][name] = {} config[section][name][key] = value # print(f"row: {row}, section: {section}, name: {name}, value: {value}, type: {type(v)}") res = config.get(filter, {}) res = res.get(filter2, {}) if filter2 else res return res def onDeepGrowFiducialNodeModified(self, observer, eventid): logging.debug("Deepgrow Point Event!!") if self.ignoreFiducialNodeAddEvent: return markupsNode = observer movingMarkupIndex = markupsNode.GetDisplayNode().GetActiveControlPoint() logging.debug("Markup point added; point ID = {}".format(movingMarkupIndex)) current_point = self.getFiducialPointXYZ(markupsNode, movingMarkupIndex) if not self.ui.dgUpdateCheckBox.checked: self.onClickDeepgrow(current_point, skip_infer=True) return self.onClickDeepgrow(current_point) self.ignoreFiducialNodeAddEvent = True self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints") self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints") self.ignoreFiducialNodeAddEvent = False def getFiducialPointsXYZ(self, fiducialNode, name): v = self._volumeNode RasToIjkMatrix = vtk.vtkMatrix4x4() v.GetRASToIJKMatrix(RasToIjkMatrix) point_set = [] n = fiducialNode.GetNumberOfFiducials() for i in range(n): coord = [0.0, 0.0, 0.0] fiducialNode.GetNthFiducialPosition(i, coord) world = [0, 0, 0, 0] fiducialNode.GetNthFiducialWorldCoordinates(i, world) p_Ras = [coord[0], coord[1], coord[2], 1.0] p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras) p_Ijk = [round(i) for i in p_Ijk] logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk)) point_set.append(p_Ijk[0:3]) logging.info("{} => Current Fiducials-Points: {}".format(name, point_set)) return point_set def getFiducialPointXYZ(self, fiducialNode, index): v = self._volumeNode RasToIjkMatrix = vtk.vtkMatrix4x4() v.GetRASToIJKMatrix(RasToIjkMatrix) coord = [0.0, 0.0, 0.0] fiducialNode.GetNthFiducialPosition(index, coord) world = [0, 0, 0, 0] fiducialNode.GetNthFiducialWorldCoordinates(index, world) p_Ras = [coord[0], coord[1], coord[2], 1.0] p_Ijk = RasToIjkMatrix.MultiplyDoublePoint(p_Ras) p_Ijk = [round(i) for i in p_Ijk] logging.debug("RAS: {}; WORLD: {}; IJK: {}".format(coord, world, p_Ijk)) return p_Ijk[0:3] def onEditFiducialPoints(self, fiducialNode, tagName): if fiducialNode is None: return fiducialNode.RemoveAllMarkups() segmentId, segment = self.currentSegment() if segment and segmentId: v = self._volumeNode IjkToRasMatrix = vtk.vtkMatrix4x4() v.GetIJKToRASMatrix(IjkToRasMatrix) fPosStr = vtk.mutable("") segment.GetTag(tagName, fPosStr) pointset = str(fPosStr) logging.debug("{} => {} Fiducial points are: {}".format(segmentId, segment.GetName(), pointset)) if fPosStr is not None and len(pointset) > 0: points = json.loads(pointset) for p in points: p_Ijk = [p[0], p[1], p[2], 1.0] p_Ras = IjkToRasMatrix.MultiplyDoublePoint(p_Ijk) logging.debug("Add Fiducial: {} => {}".format(p_Ijk, p_Ras)) fiducialNode.AddFiducialFromArray(p_Ras[0:3]) def currentSegment(self): segmentation = self._segmentNode.GetSegmentation() segmentId = segmentation.GetSegmentIdBySegmentName(self.ui.labelComboBox.currentText) segment = segmentation.GetSegment(segmentId) logging.debug("Current SegmentID: {}; Segment: {}".format(segmentId, segment)) return segmentId, segment def onSelectLabel(self, caller=None, event=None): self.updateParameterNodeFromGUI(caller, event) self.ignoreFiducialNodeAddEvent = True self.onEditFiducialPoints(self.dgPositiveFiducialNode, "MONAILabel.ForegroundPoints") self.onEditFiducialPoints(self.dgNegativeFiducialNode, "MONAILabel.BackgroundPoints") self.ignoreFiducialNodeAddEvent = False def icon(self, name="MONAILabel.png"): # It should not be necessary to modify this method iconPath = os.path.join(os.path.dirname(__file__), "Resources", "Icons", name) if os.path.exists(iconPath): return qt.QIcon(iconPath) return qt.QIcon() def updateServerSettings(self): self.logic.setServer(self.serverUrl()) self.logic.setClientId(slicer.util.settingsValue("MONAILabel/clientId", "user-xyz")) self.saveServerUrl() def serverUrl(self): serverUrl = self.ui.serverComboBox.currentText if not serverUrl: serverUrl = "http://127.0.0.1:8000" return serverUrl.rstrip("/") def saveServerUrl(self): self.updateParameterNodeFromGUI() # Save selected server URL settings = qt.QSettings() serverUrl = self.ui.serverComboBox.currentText settings.setValue("MONAILabel/serverUrl", serverUrl) # Save current server URL to the top of history serverUrlHistory = settings.value("MONAILabel/serverUrlHistory") if serverUrlHistory: serverUrlHistory = serverUrlHistory.split(";") else: serverUrlHistory = [] try: serverUrlHistory.remove(serverUrl) except ValueError: pass serverUrlHistory.insert(0, serverUrl) serverUrlHistory = serverUrlHistory[:10] # keep up to first 10 elements settings.setValue("MONAILabel/serverUrlHistory", ";".join(serverUrlHistory)) self.updateServerUrlGUIFromSettings() def onClickFetchInfo(self): self.fetchInfo() self.updateConfigTable() def fetchInfo(self, showInfo=False): if not self.logic: return start = time.time() try: self.updateServerSettings() info = self.logic.info() self.info = info if self.info.get("config"): slicer.util.errorDisplay( "Please upgrade the monai server to latest version", detailedText=traceback.format_exc(), ) return except: slicer.util.errorDisplay( "Failed to fetch models from remote server. " "Make sure server address is correct and <server_uri>/info/ " "is accessible in browser", detailedText=traceback.format_exc(), ) return self.models.clear() self.config = info.get("config", {}) model_count = {} models = info.get("models", {}) for k, v in models.items(): model_type = v.get("type", "segmentation") model_count[model_type] = model_count.get(model_type, 0) + 1 logging.debug("{} = {}".format(k, model_type)) self.models[k] = v self.updateGUIFromParameterNode() msg = "" msg += "-----------------------------------------------------\t\n" msg += "Total Models Available: \t" + str(len(models)) + "\t\n" msg += "-----------------------------------------------------\t\n" for model_type in model_count.keys(): msg += model_type.capitalize() + " Models: \t" + str(model_count[model_type]) + "\t\n" msg += "-----------------------------------------------------\t\n" if showInfo: qt.QMessageBox.information(slicer.util.mainWindow(), "MONAI Label", msg) logging.info(msg) logging.info("Time consumed by fetch info: {0:3.1f}".format(time.time() - start)) def setProgressBarLabelText(self, label): if not self.progressBar: self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100) self.progressBar.labelText = label def reportProgress(self, progressPercentage): if not self.progressBar: self.progressBar = slicer.util.createProgressDialog(windowTitle="Wait...", maximum=100) self.progressBar.show() self.progressBar.activateWindow() self.progressBar.setValue(progressPercentage) slicer.app.processEvents() def onTraining(self): start = time.time() status = None try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) self.updateServerSettings() model = self.ui.trainerBox.currentText model = model if model and model != "ALL" else None params = self.getParamsFromConfig("train", model) status = self.logic.train_start(model, params) self.ui.trainingProgressBar.setValue(1) self.ui.trainingProgressBar.setToolTip("Training: STARTED") time.sleep(1) self.updateGUIFromParameterNode() except: slicer.util.errorDisplay( "Failed to run training in MONAI Label Server", detailedText=traceback.format_exc() ) finally: qt.QApplication.restoreOverrideCursor() if status: msg = "ID: {}\nStatus: {}\nStart Time: {}\n".format( status.get("id"), status.get("status"), status.get("start_ts"), ) # slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2)) logging.info(msg) logging.info("Time consumed by training: {0:3.1f}".format(time.time() - start)) def onStopTraining(self): start = time.time() status = None if not slicer.util.confirmOkCancelDisplay( "This will kill/stop current Training task. Are you sure to continue?" ): return try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) self.updateServerSettings() status = self.logic.train_stop() except: slicer.util.errorDisplay("Failed to stop Training Task", detailedText=traceback.format_exc()) finally: qt.QApplication.restoreOverrideCursor() if status: msg = "Status: {}\nStart Time: {}\nEnd Time: {}\nResult: {}".format( status.get("status"), status.get("start_ts"), status.get("end_ts"), status.get("result", status.get("details", [])[-1]), ) # slicer.util.infoDisplay(msg, detailedText=json.dumps(status, indent=2)) logging.info(msg) self.updateGUIFromParameterNode() logging.info("Time consumed by stop training: {0:3.1f}".format(time.time() - start)) def isTrainingRunning(self, check_only=True): if not self.logic: return False self.updateServerSettings() return self.logic.train_status(check_only) def onNextSampleButton(self): if not self.logic: return if self._volumeNode or len(slicer.util.getNodesByClass("vtkMRMLScalarVolumeNode")): if not slicer.util.confirmOkCancelDisplay( "This will close current scene. Please make sure you have saved your current work.\n" "Are you sure to continue?" ): return self.onClearScribbles() slicer.mrmlScene.Clear(0) start = time.time() try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) self.updateServerSettings() strategy = self.ui.strategyBox.currentText if not strategy: slicer.util.errorDisplay("No Strategy Found/Selected\t") return sample = self.logic.next_sample(strategy, self.getParamsFromConfig("activelearning", strategy)) logging.debug(sample) if not sample.get("id"): slicer.util.warningDisplay( "Unlabled Samples/Images Not Found at server. Instead you can load your own image." ) return if self.samples.get(sample["id"]) is not None: self.current_sample = self.samples[sample["id"]] name = self.current_sample["VolumeNodeName"] index = self.ui.inputSelector.findText(name) self.ui.inputSelector.setCurrentIndex(index) return logging.info(sample) image_id = sample["id"] image_file = sample.get("path") image_name = sample.get("name", image_id) node_name = sample.get("PatientID", sample.get("name", image_id))[-20:] checksum = sample.get("checksum") local_exists = image_file and os.path.exists(image_file) logging.info(f"Check if file exists/shared locally: {image_file} => {local_exists}") if local_exists: self._volumeNode = slicer.util.loadVolume(image_file) self._volumeNode.SetName(node_name) else: download_uri = f"{self.serverUrl()}/datastore/image?image={quote_plus(image_id)}" logging.info(download_uri) sampleDataLogic = SampleData.SampleDataLogic() self._volumeNode = sampleDataLogic.downloadFromURL( nodeNames=node_name, fileNames=image_name, uris=download_uri, checksums=checksum )[0] self.initSample(sample) except: slicer.util.errorDisplay( "Failed to fetch Sample from MONAI Label Server", detailedText=traceback.format_exc() ) finally: qt.QApplication.restoreOverrideCursor() self.updateGUIFromParameterNode() logging.info("Time consumed by next_sample: {0:3.1f}".format(time.time() - start)) def initSample(self, sample, autosegment=True): sample["VolumeNodeName"] = self._volumeNode.GetName() self.current_sample = sample self.samples[sample["id"]] = sample self._volumeNodes.append(self._volumeNode) # Create Empty Segments for all labels for this node self.createSegmentNode() segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor segmentEditorWidget.setSegmentationNode(self._segmentNode) segmentEditorWidget.setMasterVolumeNode(self._volumeNode) # check if user allows overlapping segments if slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", False, converter=slicer.util.toBool): # set segment editor to allow overlaps slicer.util.getNodesByClass("vtkMRMLSegmentEditorNode")[0].SetOverwriteMode(2) if self.info.get("labels"): self.updateSegmentationMask(None, self.info.get("labels")) # Check if user wants to run auto-segmentation on new sample if autosegment and slicer.util.settingsValue( "MONAILabel/autoRunSegmentationOnNextSample", True, converter=slicer.util.toBool ): for label in self.info.get("labels", []): for name, model in self.models.items(): if label in model.get("labels", []): qt.QApplication.restoreOverrideCursor() self.ui.segmentationModelSelector.currentText = name self.onClickSegmentation() return def getPermissionForImageDataUpload(self): return slicer.util.confirmOkCancelDisplay( "Master volume - without any additional patient information -" " will be sent to remote data processing server: {0}.\n\n" "Click 'OK' to proceed with the segmentation.\n" "Click 'Cancel' to not upload any data and cancel segmentation.\n".format(self.serverUrl()), dontShowAgainSettingsKey="MONAILabel/showImageDataSendWarning", ) def onUploadImage(self, init_sample=True, session=False): volumeNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLScalarVolumeNode") image_id = volumeNode.GetName() if not self.getPermissionForImageDataUpload(): return False try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) in_file = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name self.reportProgress(5) start = time.time() slicer.util.saveNode(volumeNode, in_file) logging.info("Saved Input Node into {0} in {1:3.1f}s".format(in_file, time.time() - start)) self.reportProgress(30) if session: self.current_sample["session_id"] = self.logic.create_session(in_file)["session_id"] else: self.logic.upload_image(in_file, image_id) self.current_sample["session"] = False self.reportProgress(100) self._volumeNode = volumeNode if init_sample: self.initSample({"id": image_id}, autosegment=False) qt.QApplication.restoreOverrideCursor() self.updateGUIFromParameterNode() return True except: self.reportProgress(100) qt.QApplication.restoreOverrideCursor() if session: slicer.util.errorDisplay( "Server Error:: Session creation Failed\nPlease upgrade to latest monailable version (> 0.2.0)", detailedText=traceback.format_exc(), ) else: slicer.util.errorDisplay("Failed to upload volume to Server", detailedText=traceback.format_exc()) return False def onImportLabel(self): if not self.ui.labelPathLineEdit.currentPath or not os.path.exists(self.ui.labelPathLineEdit.currentPath): slicer.util.warningDisplay("Label File not selected") return try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) self.updateSegmentationMask(self.ui.labelPathLineEdit.currentPath, self.info["labels"]) qt.QApplication.restoreOverrideCursor() except: qt.QApplication.restoreOverrideCursor() slicer.util.errorDisplay("Failed to import label", detailedText=traceback.format_exc()) def onSaveLabel(self): start = time.time() labelmapVolumeNode = None result = None self.onClearScribbles() if self.current_sample.get("session"): if not self.onUploadImage(init_sample=False): return try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) segmentationNode = self._segmentNode labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode") slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode( segmentationNode, labelmapVolumeNode, self._volumeNode ) segmentation = segmentationNode.GetSegmentation() totalSegments = segmentation.GetNumberOfSegments() segmentIds = [segmentation.GetNthSegmentID(i) for i in range(totalSegments)] label_info = [] for idx, segmentId in enumerate(segmentIds): segment = segmentation.GetSegment(segmentId) if segment.GetName() in ["foreground_scribbles", "background_scribbles"]: logging.info(f"Removing segment {segmentId}: {segment.GetName()}") segmentationNode.RemoveSegment(segmentId) continue label_info.append({"name": segment.GetName(), "idx": idx + 1}) # label_info.append({"color": segment.GetColor()}) label_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name self.reportProgress(5) if ( slicer.util.settingsValue("MONAILabel/allowOverlappingSegments", True, converter=slicer.util.toBool) and slicer.util.settingsValue("MONAILabel/fileExtension", self.file_ext) == ".seg.nrrd" ): slicer.util.saveNode(segmentationNode, label_in) else: slicer.util.saveNode(labelmapVolumeNode, label_in) self.reportProgress(30) self.updateServerSettings() result = self.logic.save_label(self.current_sample["id"], label_in, {"label_info": label_info}) self.fetchInfo() if slicer.util.settingsValue("MONAILabel/autoUpdateModel", True, converter=slicer.util.toBool): try: if self.isTrainingRunning(check_only=True): self.logic.train_stop() except: logging.info("Failed to stop training; or already stopped") self.onTraining() except: slicer.util.errorDisplay("Failed to save Label to MONAI Label Server", detailedText=traceback.format_exc()) finally: qt.QApplication.restoreOverrideCursor() self.reportProgress(100) if labelmapVolumeNode: slicer.mrmlScene.RemoveNode(labelmapVolumeNode) if result: slicer.util.infoDisplay( "Label-Mask saved into MONAI Label Server\t\t", detailedText=json.dumps(result, indent=2) ) if slicer.util.settingsValue("MONAILabel/autoFetchNextSample", False, converter=slicer.util.toBool): slicer.mrmlScene.Clear(0) self.onNextSampleButton() logging.info("Time consumed by save label: {0:3.1f}".format(time.time() - start)) def getSessionId(self): session_id = None if self.current_sample.get("session", False): session_id = self.current_sample.get("session_id") if not session_id or not self.logic.get_session(session_id): self.onUploadImage(init_sample=False, session=True) session_id = self.current_sample["session_id"] return session_id def onClickSegmentation(self): if not self.current_sample: return start = time.time() result_file = None try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) self.updateServerSettings() model = self.ui.segmentationModelSelector.currentText image_file = self.current_sample["id"] params = self.getParamsFromConfig("infer", model) result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId()) print(f"Result Params for Segmentation: {params}") labels = ( params.get("label_names") if params and params.get("label_names") else self.models[model].get("labels") ) if labels and isinstance(labels, dict): labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])] self.updateSegmentationMask(result_file, labels) except: slicer.util.errorDisplay( "Failed to run inference in MONAI Label Server", detailedText=traceback.format_exc() ) finally: qt.QApplication.restoreOverrideCursor() if result_file and os.path.exists(result_file): os.unlink(result_file) self.updateGUIFromParameterNode() logging.info("Time consumed by segmentation: {0:3.1f}".format(time.time() - start)) def onUpdateDeepgrow(self): self.onClickDeepgrow(None) def onClickDeepgrow(self, current_point, skip_infer=False): model = self.ui.deepgrowModelSelector.currentText if not model: slicer.util.warningDisplay("Please select a deepgrow model") return _, segment = self.currentSegment() if not segment: slicer.util.warningDisplay("Please add the required label to run deepgrow") return foreground_all = self.getFiducialPointsXYZ(self.dgPositiveFiducialNode, "foreground") background_all = self.getFiducialPointsXYZ(self.dgNegativeFiducialNode, "background") segment.SetTag("MONAILabel.ForegroundPoints", json.dumps(foreground_all)) segment.SetTag("MONAILabel.BackgroundPoints", json.dumps(background_all)) if skip_infer: return # use model info "deepgrow" to determine deepgrow_3d = False if self.models[model].get("dimension", 3) == 2 else True start = time.time() label = segment.GetName() operationDescription = "Run Deepgrow for segment: {}; model: {}; 3d {}".format(label, model, deepgrow_3d) logging.debug(operationDescription) if not current_point: if not foreground_all and not deepgrow_3d: slicer.util.warningDisplay(operationDescription + " - points not added") return current_point = foreground_all[-1] if foreground_all else background_all[-1] if background_all else None try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) sliceIndex = None if self.multi_label: params = {} segmentation = self._segmentNode.GetSegmentation() for name in self.info.get("labels", []): points = [] segmentId = segmentation.GetSegmentIdBySegmentName(name) segment = segmentation.GetSegment(segmentId) if segmentId else None if segment: fPosStr = vtk.mutable("") segment.GetTag("MONAILabel.ForegroundPoints", fPosStr) pointset = str(fPosStr) print("{} => {} Fiducial points are: {}".format(segmentId, name, pointset)) if fPosStr is not None and len(pointset) > 0: points = json.loads(pointset) params[name] = points params["label"] = label labels = None else: sliceIndex = current_point[2] if current_point else None logging.debug("Slice Index: {}".format(sliceIndex)) if deepgrow_3d or not sliceIndex: foreground = foreground_all background = background_all else: foreground = [x for x in foreground_all if x[2] == sliceIndex] background = [x for x in background_all if x[2] == sliceIndex] logging.debug("Foreground: {}".format(foreground)) logging.debug("Background: {}".format(background)) logging.debug("Current point: {}".format(current_point)) params = { "label": label, "foreground": foreground, "background": background, } labels = [label] params["label"] = label params.update(self.getParamsFromConfig("infer", model)) print(f"Request Params for Deepgrow/Deepedit: {params}") image_file = self.current_sample["id"] result_file, params = self.logic.infer(model, image_file, params, session_id=self.getSessionId()) print(f"Result Params for Deepgrow/Deepedit: {params}") if labels is None: labels = ( params.get("label_names") if params and params.get("label_names") else self.models[model].get("labels") ) if labels and isinstance(labels, dict): labels = [k for k, _ in sorted(labels.items(), key=lambda item: item[1])] freeze = label if self.ui.freezeUpdateCheckBox.checked else None self.updateSegmentationMask(result_file, labels, None if deepgrow_3d else sliceIndex, freeze=freeze) except: logging.exception("Unknown Exception") slicer.util.errorDisplay(operationDescription + " - unexpected error.", detailedText=traceback.format_exc()) finally: qt.QApplication.restoreOverrideCursor() self.updateGUIFromParameterNode() logging.info("Time consumed by Deepgrow: {0:3.1f}".format(time.time() - start)) def createCursor(self, widget): return slicer.util.mainWindow().cursor def createSegmentNode(self): if self._volumeNode is None: return if self._segmentNode is None: name = "segmentation_" + self._volumeNode.GetName() self._segmentNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLSegmentationNode") self._segmentNode.SetReferenceImageGeometryParameterFromVolumeNode(self._volumeNode) self._segmentNode.SetName(name) def getLabelColor(self, name): color = GenericAnatomyColors.get(name.lower()) return [c / 255.0 for c in color] if color else None def updateSegmentationMask(self, in_file, labels, sliceIndex=None, freeze=None): # TODO:: Add ROI Node (for Bounding Box if provided in the result) start = time.time() logging.debug("Update Segmentation Mask from: {}".format(in_file)) if in_file and not os.path.exists(in_file): return False segmentationNode = self._segmentNode segmentation = segmentationNode.GetSegmentation() if in_file is None: for label in labels: if not segmentation.GetSegmentIdBySegmentName(label): segmentation.AddEmptySegment(label, label, self.getLabelColor(label)) return True labels = [l for l in labels if l != "background"] print(f"Update Segmentation Mask using Labels: {labels}") # segmentId, segment = self.currentSegment() labelImage = sitk.ReadImage(in_file) labelmapVolumeNode = sitkUtils.PushVolumeToSlicer(labelImage, None, className="vtkMRMLLabelMapVolumeNode") existing_label_ids = {} for label in labels: id = segmentation.GetSegmentIdBySegmentName(label) if id: existing_label_ids[label] = id freeze = [freeze] if freeze and isinstance(freeze, str) else freeze print(f"Import only Freezed label: {freeze}") numberOfExistingSegments = segmentation.GetNumberOfSegments() slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(labelmapVolumeNode, segmentationNode) slicer.mrmlScene.RemoveNode(labelmapVolumeNode) numberOfAddedSegments = segmentation.GetNumberOfSegments() - numberOfExistingSegments logging.debug("Adding {} segments".format(numberOfAddedSegments)) addedSegmentIds = [ segmentation.GetNthSegmentID(numberOfExistingSegments + i) for i in range(numberOfAddedSegments) ] for i, segmentId in enumerate(addedSegmentIds): segment = segmentation.GetSegment(segmentId) print("Setting new segmentation with id: {} => {}".format(segmentId, segment.GetName())) label = labels[i] if i < len(labels) else "unknown {}".format(i) # segment.SetName(label) # segment.SetColor(self.getLabelColor(label)) if freeze and label not in freeze: print(f"Discard label update for: {label}") elif label in existing_label_ids: segmentEditorWidget = slicer.modules.segmenteditor.widgetRepresentation().self().editor segmentEditorWidget.setSegmentationNode(segmentationNode) segmentEditorWidget.setMasterVolumeNode(self._volumeNode) segmentEditorWidget.setCurrentSegmentID(existing_label_ids[label]) effect = segmentEditorWidget.effectByName("Logical operators") labelmap = slicer.vtkOrientedImageData() segmentationNode.GetBinaryLabelmapRepresentation(segmentId, labelmap) if sliceIndex: selectedSegmentLabelmap = effect.selectedSegmentLabelmap() dims = selectedSegmentLabelmap.GetDimensions() count = 0 for x in range(dims[0]): for y in range(dims[1]): if selectedSegmentLabelmap.GetScalarComponentAsDouble(x, y, sliceIndex, 0): count = count + 1 selectedSegmentLabelmap.SetScalarComponentFromDouble(x, y, sliceIndex, 0, 0) logging.debug("Total Non Zero: {}".format(count)) # Clear the Slice if count: effect.modifySelectedSegmentByLabelmap( selectedSegmentLabelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet ) # Union label map effect.modifySelectedSegmentByLabelmap( labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeAdd ) else: # adding bypass masking to not overwrite other layers, # needed for preserving scribbles during updates # help from: https://github.com/Slicer/Slicer/blob/master/Modules/Loadable/Segmentations/EditorEffects/Python/SegmentEditorLogicalEffect.py bypassMask = True effect.modifySelectedSegmentByLabelmap( labelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeSet, bypassMask ) segmentationNode.RemoveSegment(segmentId) self.showSegmentationsIn3D() logging.info("Time consumed by updateSegmentationMask: {0:3.1f}".format(time.time() - start)) return True def showSegmentationsIn3D(self): # add closed surface representation if self._segmentNode: self._segmentNode.CreateClosedSurfaceRepresentation() view = slicer.app.layoutManager().threeDWidget(0).threeDView() view.resetFocalPoint() def updateServerUrlGUIFromSettings(self): # Save current server URL to the top of history settings = qt.QSettings() serverUrlHistory = settings.value("MONAILabel/serverUrlHistory") wasBlocked = self.ui.serverComboBox.blockSignals(True) self.ui.serverComboBox.clear() if serverUrlHistory: self.ui.serverComboBox.addItems(serverUrlHistory.split(";")) self.ui.serverComboBox.setCurrentText(settings.value("MONAILabel/serverUrl")) self.ui.serverComboBox.blockSignals(wasBlocked) def createFiducialNode(self, name, onMarkupNodeModified, color): displayNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsDisplayNode") displayNode.SetTextScale(0) displayNode.SetSelectedColor(color) fiducialNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLMarkupsFiducialNode") fiducialNode.SetName(name) fiducialNode.SetAndObserveDisplayNodeID(displayNode.GetID()) fiducialNodeObservers = [] self.addFiducialNodeObserver(fiducialNode, onMarkupNodeModified) return fiducialNode, fiducialNodeObservers def removeFiducialNodeObservers(self, fiducialNode, fiducialNodeObservers): if fiducialNode and fiducialNodeObservers: for observer in fiducialNodeObservers: fiducialNode.RemoveObserver(observer) def addFiducialNodeObserver(self, fiducialNode, onMarkupNodeModified): fiducialNodeObservers = [] if fiducialNode: eventIds = [slicer.vtkMRMLMarkupsNode.PointPositionDefinedEvent] for eventId in eventIds: fiducialNodeObservers.append(fiducialNode.AddObserver(eventId, onMarkupNodeModified)) return fiducialNodeObservers def scribblesLayersPresent(self): scribbles_exist = False if self._segmentNode is not None: segmentationNode = self._segmentNode segmentation = segmentationNode.GetSegmentation() numSegments = segmentation.GetNumberOfSegments() segmentIds = [segmentation.GetNthSegmentID(i) for i in range(numSegments)] scribbles_exist = sum([int("scribbles" in sid) for sid in segmentIds]) > 0 return scribbles_exist def onStartScribbling(self): if not self._segmentNode: return logging.debug("Scribbles start event") if (not self.scribblesLayersPresent()) and (self._scribblesEditorWidget is None): # add background, layer index = -2 [2], color = red self._segmentNode.GetSegmentation().AddEmptySegment( "background_scribbles", "background_scribbles", [1.0, 0.0, 0.0] ) # add foreground, layer index = -1 [3], color = green self._segmentNode.GetSegmentation().AddEmptySegment( "foreground_scribbles", "foreground_scribbles", [0.0, 1.0, 0.0] ) # change segmentation display properties to "see through" the scribbles # further explanation at: # https://apidocs.slicer.org/master/classvtkMRMLSegmentationDisplayNode.html segmentationDisplayNode = self._segmentNode.GetDisplayNode() # background opacity = 0.2 segmentationDisplayNode.SetSegmentOpacity2DFill("background_scribbles", opacity) segmentationDisplayNode.SetSegmentOpacity2DOutline("background_scribbles", opacity) # foreground segmentationDisplayNode.SetSegmentOpacity2DFill("foreground_scribbles", opacity) segmentationDisplayNode.SetSegmentOpacity2DOutline("foreground_scribbles", opacity) # create segmentEditorWidget to access "Paint" and "Erase" segmentation tools # these will be used to draw scribbles self._scribblesEditorWidget = slicer.qMRMLSegmentEditorWidget() self._scribblesEditorWidget.setMRMLScene(slicer.mrmlScene) segmentEditorNode = slicer.vtkMRMLSegmentEditorNode() # adding new scribbles can overwrite a new one-hot vector, hence erase any existing # labels - this is not a desired behaviour hence we swith to overlay mode that enables drawing # scribbles without changing existing labels. Further explanation at: # https://discourse.slicer.org/t/how-can-i-set-masking-settings-on-a-segment-editor-effect-in-python/4406/7 segmentEditorNode.SetOverwriteMode(slicer.vtkMRMLSegmentEditorNode.OverwriteNone) # add all nodes to the widget slicer.mrmlScene.AddNode(segmentEditorNode) self._scribblesEditorWidget.setMRMLSegmentEditorNode(segmentEditorNode) self._scribblesEditorWidget.setSegmentationNode(self._segmentNode) self._scribblesEditorWidget.setMasterVolumeNode(self._volumeNode) def onUpdateScribbles(self): logging.info("Scribbles update event") scribblesMethod = self.ui.scribblesMethodSelector.currentText scribbles_in = None result_file = None try: qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor) # get scribbles + label segmentationNode = self._segmentNode labelmapVolumeNode = slicer.mrmlScene.AddNewNodeByClass("vtkMRMLLabelMapVolumeNode") slicer.modules.segmentations.logic().ExportVisibleSegmentsToLabelmapNode( segmentationNode, labelmapVolumeNode, self._volumeNode ) scribbles_in = tempfile.NamedTemporaryFile(suffix=self.file_ext, dir=self.tmpdir).name self.reportProgress(5) # save scribbles + label to file slicer.util.saveNode(labelmapVolumeNode, scribbles_in) self.reportProgress(30) self.updateServerSettings() self.reportProgress(60) # try to first fetch vtkMRMLAnnotationROINode roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLAnnotationROINode") if roiNode == None: # if vtkMRMLAnnotationROINode not present, then check for vtkMRMLMarkupsROINode node roiNode = slicer.mrmlScene.GetFirstNodeByClass("vtkMRMLMarkupsROINode") # if roi node found, then try to get roi selected_roi = self.getROIPointsXYZ(roiNode) # send scribbles + label to server along with selected scribbles method params = self.getParamsFromConfig("infer", scribblesMethod) params.update({"roi": selected_roi}) image_file = self.current_sample["id"] result_file, params = self.logic.infer( scribblesMethod, image_file, params, scribbles_in, session_id=self.getSessionId() ) # display result from server self.reportProgress(90) _, segment = self.currentSegment() label = segment.GetName() self.updateSegmentationMask(result_file, [label]) except: slicer.util.errorDisplay( "Failed to post process label on MONAI Label Server using {}".format(scribblesMethod), detailedText=traceback.format_exc(), ) finally: qt.QApplication.restoreOverrideCursor() self.reportProgress(100) # clear all temporary files if scribbles_in and os.path.exists(scribbles_in): os.unlink(scribbles_in) if result_file and os.path.exists(result_file): os.unlink(result_file) def getROIPointsXYZ(self, roiNode): if roiNode == None: return [] v = self._volumeNode RasToIjkMatrix = vtk.vtkMatrix4x4() v.GetRASToIJKMatrix(RasToIjkMatrix) roi_points_ras = [0.0] * 6 if roiNode.__class__.__name__ == "vtkMRMLMarkupsROINode": # for vtkMRMLMarkupsROINode print(roiNode.__class__.__name__) center = [0] * 3 roiNode.GetCenter(center) roi_points_ras = [(x - s / 2, x + s / 2) for x, s in zip(center, roiNode.GetSize())] roi_points_ras = [item for sublist in roi_points_ras for item in sublist] elif roiNode.__class__.__name__ == "vtkMRMLAnnotationROINode": # for vtkMRMLAnnotationROINode (old method) print(roiNode.__class__.__name__) roiNode.GetBounds(roi_points_ras) else: # if none found then best to return empty list return [] min_points_ras = [roi_points_ras[0], roi_points_ras[2], roi_points_ras[4], 1.0] max_points_ras = [roi_points_ras[0 + 1], roi_points_ras[2 + 1], roi_points_ras[4 + 1], 1.0] min_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(min_points_ras) max_points_ijk = RasToIjkMatrix.MultiplyDoublePoint(max_points_ras) min_points_ijk = [round(i) for i in min_points_ijk] max_points_ijk = [round(i) for i in max_points_ijk] roi_points_ijk = [val for pair in zip(min_points_ijk[0:3], max_points_ijk[0:3]) for val in pair] logging.debug("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk)) # print("RAS: {}; IJK: {}".format(roi_points_ras, roi_points_ijk)) return roi_points_ijk def onClearScribblesSegmentNodes(self): # more explanation on this at: # https://discourse.slicer.org/t/how-to-clear-segmentation/7433/4 # clear "scribbles" segment before saving the label if not self._segmentNode: return segmentation = self._segmentNode num_segments = segmentation.GetSegmentation().GetNumberOfSegments() for i in range(num_segments): segmentId = segmentation.GetSegmentation().GetNthSegmentID(i) if "scribbles" in segmentId: logging.info("clearning {}".format(segmentId)) labelMapRep = slicer.vtkOrientedImageData() segmentation.GetBinaryLabelmapRepresentation(segmentId, labelMapRep) vtkSegmentationCore.vtkOrientedImageDataResample.FillImage(labelMapRep, 0, labelMapRep.GetExtent()) slicer.vtkSlicerSegmentationsModuleLogic.SetBinaryLabelmapToSegment( labelMapRep, segmentation, segmentId, slicer.vtkSlicerSegmentationsModuleLogic.MODE_REPLACE ) def onClearScribbles(self): # reset scribbles mode self.scribblesMode = None # clear scribbles editor widget if self._scribblesEditorWidget: widget = self._scribblesEditorWidget del widget self._scribblesEditorWidget = None # remove "scribbles" segments from label self.onClearScribblesSegmentNodes() # reset UI elements associated with scribbles self.ui.scribblesCollapsibleButton.collapsed = True self.ui.paintScribblesButton.setChecked(False) self.ui.eraseScribblesButton.setChecked(False) self.ui.scribblesLabelSelector.setCurrentIndex(0) def checkAndInitialiseScribbles(self): if not self._segmentNode: return if self._scribblesEditorWidget is None: self.onStartScribbling() if self.scribblesMode is None: self.changeScribblesMode(tool="Paint", layer="foreground_scribbles") self.updateScribToolLayerFromMode() def updateScribToolLayerFromMode(self): if not self._segmentNode: return logging.info("Scribbles mode {} ".format(self.scribblesMode)) self.checkAndInitialiseScribbles() # update tool/layer select for scribblesEditorWidget tool, layer = self.getToolAndLayerFromScribblesMode() if self._scribblesEditorWidget: self._scribblesEditorWidget.setActiveEffectByName(tool) self._scribblesEditorWidget.setCurrentSegmentID(layer) # update brush type from checkbox if tool in ("Paint", "Erase"): is3dbrush = self.ui.brush3dCheckbox.checkState() self.on3dBrushCheckbox(state=is3dbrush) # update brush size from slider brushSize = self.ui.brushSizeSlider.value self.updateBrushSize(value=brushSize) def getToolAndLayerFromScribblesMode(self): if self.scribblesMode is not None: return self.scribblesMode.split("+") else: # default modes return "Paint", "foreground_scribbles" def changeScribblesMode(self, tool=None, layer=None): ctool, clayer = self.getToolAndLayerFromScribblesMode() ctool = tool if tool != None else ctool clayer = layer if layer != None else clayer self.scribblesMode = "+".join([ctool, clayer]) def onPaintScribbles(self): if not self._segmentNode: return if self.ui.eraseScribblesButton.checked: self.ui.eraseScribblesButton.setChecked(False) self.changeScribblesMode(tool="Paint" if self.ui.paintScribblesButton.checked else "None") self.updateScribToolLayerFromMode() def onEraseScribbles(self): if not self._segmentNode: return if self.ui.paintScribblesButton.checked: self.ui.paintScribblesButton.setChecked(False) self.changeScribblesMode(tool="Erase" if self.ui.eraseScribblesButton.checked else "None") self.updateScribToolLayerFromMode() def onSelectScribblesLabel(self): if not self._segmentNode: return index = self.ui.scribblesLabelSelector.currentIndex index = 0 if index < 0 else index selected = self.ui.scribblesLabelSelector.itemText(index) layer = "foreground_scribbles" if selected == "Foreground" else "background_scribbles" self.changeScribblesMode(layer=layer) self.updateScribToolLayerFromMode() def on3dBrushCheckbox(self, state): logging.info("3D brush update {}".format(state)) self.checkAndInitialiseScribbles() effect = self._scribblesEditorWidget.activeEffect() # enable scribbles in 3d using a sphere brush effect.setParameter("BrushSphere", state) def updateBrushSize(self, value): logging.info("brush size update {}".format(value)) if self.ui.paintScribblesButton.checked or self.ui.eraseScribblesButton.checked: self.checkAndInitialiseScribbles() effect = self._scribblesEditorWidget.activeEffect() effect.setParameter("BrushAbsoluteDiameter", value) class MONAILabelLogic(ScriptedLoadableModuleLogic): def __init__(self, tmpdir=None, server_url=None, progress_callback=None, client_id=None): ScriptedLoadableModuleLogic.__init__(self) self.server_url = server_url self.tmpdir = slicer.util.tempDirectory("slicer-monai-label") if tmpdir is None else tmpdir self.client_id = client_id self.volumeToSessions = dict() self.progress_callback = progress_callback def setDefaultParameters(self, parameterNode): if not parameterNode.GetParameter("SegmentationModel"): parameterNode.SetParameter("SegmentationModel", "") if not parameterNode.GetParameter("DeepgrowModel"): parameterNode.SetParameter("DeepgrowModel", "") if not parameterNode.GetParameter("ScribblesMethod"): parameterNode.SetParameter("ScribblesMethod", "") def __del__(self): shutil.rmtree(self.tmpdir, ignore_errors=True) def setServer(self, server_url=None): self.server_url = server_url if server_url else "http://127.0.0.1:8000" def setClientId(self, client_id): self.client_id = client_id if client_id else "user-xyz" def setProgressCallback(self, progress_callback=None): self.progress_callback = progress_callback def reportProgress(self, progress): if self.progress_callback: self.progress_callback(progress) def info(self): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).info() def next_sample(self, strategy, params={}): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).next_sample(strategy, params) def create_session(self, image_in): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).create_session(image_in) def get_session(self, session_id): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).get_session(session_id) def remove_session(self, session_id): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).remove_session(session_id) def upload_image(self, image_in, image_id=None): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).upload_image(image_in, image_id) def save_label(self, image_in, label_in, params): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).save_label( image_in, label_in, params=params ) def infer(self, model, image_in, params={}, label_in=None, file=None, session_id=None): logging.debug("Preparing input data for segmentation") self.reportProgress(0) client = MONAILabelClient(self.server_url, self.tmpdir, self.client_id) result_file, params = client.infer(model, image_in, params, label_in, file, session_id) logging.debug(f"Image Response: {result_file}") logging.debug(f"JSON Response: {params}") self.reportProgress(100) return result_file, params def train_start(self, model=None, params={}): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_start(model, params) def train_status(self, check_if_running): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_status(check_if_running) def train_stop(self): return MONAILabelClient(self.server_url, self.tmpdir, self.client_id).train_stop() class MONAILabelTest(ScriptedLoadableModuleTest): def setUp(self): slicer.mrmlScene.Clear() def runTest(self): self.setUp() self.test_MONAILabel1() def test_MONAILabel1(self): self.delayDisplay("Test passed")
[ [ [ 589, 593 ], [ 31980, 31984 ] ], [ [ 601, 605 ], [ 40055, 40059 ], [ 58458, 58462 ], [ 61384, 61388 ], [ 61466, 61470 ], [ 63114, 63118 ] ], [ [ 613, 620 ], [ 37038, 37045 ], [ 37263, 37270 ], [ 38530, 38537 ], [ 38653, 38660 ], [ 39276, 39283 ], [ 39874, 39881 ], [ 40251, 40258 ], [ 40624, 40631 ], [ 43929, 43936 ], [ 44625, 44632 ], [ 44651, 44658 ], [ 46479, 46486 ], [ 46506, 46513 ], [ 47584, 47591 ], [ 47653, 47660 ], [ 48864, 48871 ], [ 49457, 49464 ], [ 49824, 49831 ], [ 50192, 50199 ], [ 50801, 50808 ], [ 53562, 53569 ], [ 56537, 56544 ], [ 57877, 57884 ], [ 58724, 58731 ], [ 60576, 60583 ], [ 61856, 61863 ], [ 63355, 63362 ], [ 63759, 63766 ], [ 63826, 63833 ], [ 63893, 63900 ], [ 65170, 65177 ], [ 65450, 65457 ], [ 66390, 66397 ], [ 67891, 67898 ], [ 69786, 69793 ], [ 70982, 70989 ], [ 73734, 73741 ], [ 76290, 76297 ], [ 80466, 80473 ], [ 81203, 81210 ], [ 82858, 82865 ], [ 85294, 85301 ], [ 85598, 85605 ], [ 88501, 88508 ], [ 88773, 88780 ], [ 88829, 88836 ] ], [ [ 628, 630 ], [ 41253, 41255 ], [ 41266, 41268 ], [ 41332, 41334 ], [ 49784, 49786 ], [ 54859, 54861 ], [ 60457, 60459 ], [ 60502, 60504 ], [ 66484, 66486 ], [ 78758, 78760 ], [ 78804, 78806 ], [ 78860, 78862 ], [ 78905, 78907 ] ], [ [ 638, 644 ], [ 16216, 16222 ], [ 86832, 86838 ] ], [ [ 652, 660 ], [ 53356, 53364 ], [ 56866, 56874 ], [ 76913, 76921 ] ], [ [ 668, 672 ], [ 42904, 42908 ], [ 44711, 44715 ], [ 45346, 45350 ], [ 45873, 45877 ], [ 46564, 46568 ], [ 46633, 46637 ], [ 47716, 47720 ], [ 48433, 48437 ], [ 50862, 50866 ], [ 53484, 53488 ], [ 53632, 53636 ], [ 55441, 55445 ], [ 58784, 58788 ], [ 59316, 59320 ], [ 60638, 60642 ], [ 61687, 61691 ], [ 65508, 65512 ], [ 66370, 66374 ], [ 71054, 71058 ] ], [ [ 680, 689 ], [ 43223, 43232 ], [ 43555, 43564 ], [ 46064, 46073 ], [ 47093, 47102 ], [ 50644, 50653 ], [ 54569, 54578 ], [ 54720, 54729 ], [ 55373, 55382 ], [ 58083, 58092 ], [ 60320, 60329 ], [ 65306, 65315 ], [ 78541, 78550 ] ], [ [ 714, 725 ], [ 8379, 8390 ], [ 8417, 8428 ], [ 8453, 8464 ] ], [ [ 751, 761 ], [ 50153, 50163 ] ], [ [ 770, 773 ], [ 7530, 7533 ], [ 2163, 2166 ], [ 3933, 3936 ], [ 4509, 4512 ], [ 5063, 5066 ], [ 5651, 5654 ], [ 6199, 6202 ], [ 6794, 6797 ], [ 7602, 7605 ] ], [ [ 781, 783 ], [ 2101, 2103 ], [ 2260, 2262 ], [ 2306, 2308 ], [ 2457, 2459 ], [ 2521, 2523 ], [ 2714, 2716 ], [ 2784, 2786 ], [ 3081, 3083 ], [ 3146, 3148 ], [ 3429, 3431 ], [ 3504, 3506 ], [ 3998, 4000 ], [ 4070, 4072 ], [ 4157, 4159 ], [ 4574, 4576 ], [ 4646, 4648 ], [ 4729, 4731 ], [ 5124, 5126 ], [ 5196, 5198 ], [ 5278, 5280 ], [ 5711, 5713 ], [ 5783, 5785 ], [ 5863, 5865 ], [ 6257, 6259 ], [ 6329, 6331 ], [ 6491, 6493 ], [ 6853, 6855 ], [ 6925, 6927 ], [ 15776, 15778 ], [ 15920, 15922 ], [ 16075, 16077 ], [ 26053, 26055 ], [ 31157, 31159 ], [ 33354, 33356 ], [ 33433, 33435 ], [ 33529, 33531 ], [ 33651, 33653 ], [ 33701, 33703 ], [ 33822, 33824 ], [ 33948, 33950 ], [ 34249, 34251 ], [ 34443, 34445 ], [ 36061, 36063 ], [ 41377, 41379 ], [ 41411, 41413 ], [ 41947, 41949 ], [ 44544, 44546 ], [ 45405, 45407 ], [ 45439, 45441 ], [ 46130, 46132 ], [ 46858, 46860 ], [ 46892, 46894 ], [ 47146, 47148 ], [ 48470, 48472 ], [ 48504, 48506 ], [ 50710, 50712 ], [ 52291, 52293 ], [ 53282, 53284 ], [ 53316, 53318 ], [ 54137, 54139 ], [ 54313, 54315 ], [ 55025, 55027 ], [ 55059, 55061 ], [ 55189, 55191 ], [ 55257, 55259 ], [ 55696, 55698 ], [ 55730, 55732 ], [ 58136, 58138 ], [ 59380, 59382 ], [ 59414, 59416 ], [ 60386, 60388 ], [ 62233, 62235 ], [ 62267, 62269 ], [ 65359, 65361 ], [ 71498, 71500 ], [ 76480, 76482 ], [ 76514, 76516 ], [ 78608, 78610 ] ], [ [ 791, 801 ], [ 50254, 50264 ] ], [ [ 809, 826 ], [ 67084, 67088 ] ], [ [ 834, 843 ], [ 67137, 67146 ] ], [ [ 851, 857 ], [ 1694, 1700 ], [ 1818, 1824 ], [ 1931, 1937 ], [ 7094, 7100 ], [ 7175, 7181 ], [ 7211, 7217 ], [ 7303, 7309 ], [ 9511, 9517 ], [ 9627, 9633 ], [ 9932, 9938 ], [ 10062, 10068 ], [ 10080, 10086 ], [ 10163, 10169 ], [ 10181, 10187 ], [ 10260, 10266 ], [ 10278, 10284 ], [ 10513, 10519 ], [ 11382, 11388 ], [ 11774, 11780 ], [ 15342, 15348 ], [ 15664, 15670 ], [ 15736, 15742 ], [ 15957, 15963 ], [ 17825, 17831 ], [ 18498, 18504 ], [ 20880, 20886 ], [ 21596, 21602 ], [ 21721, 21727 ], [ 25469, 25475 ], [ 25539, 25545 ], [ 28651, 28657 ], [ 41537, 41543 ], [ 43091, 43097 ], [ 43316, 43322 ], [ 44571, 44577 ], [ 44844, 44850 ], [ 45071, 45077 ], [ 45276, 45282 ], [ 45961, 45967 ], [ 46682, 46688 ], [ 47023, 47029 ], [ 48045, 48051 ], [ 48121, 48127 ], [ 48390, 48396 ], [ 48663, 48669 ], [ 48939, 48945 ], [ 49973, 49979 ], [ 50539, 50545 ], [ 51256, 51262 ], [ 51519, 51525 ], [ 51601, 51607 ], [ 51685, 51691 ], [ 51969, 51975 ], [ 52070, 52076 ], [ 52553, 52559 ], [ 53071, 53077 ], [ 53508, 53514 ], [ 54393, 54399 ], [ 54645, 54651 ], [ 54926, 54932 ], [ 55309, 55315 ], [ 55830, 55836 ], [ 55906, 55912 ], [ 57007, 57013 ], [ 57088, 57094 ], [ 57128, 57134 ], [ 57243, 57249 ], [ 57326, 57332 ], [ 57607, 57613 ], [ 57679, 57685 ], [ 57999, 58005 ], [ 58265, 58271 ], [ 58352, 58358 ], [ 58525, 58531 ], [ 58602, 58608 ], [ 58643, 58649 ], [ 60216, 60222 ], [ 60885, 60891 ], [ 61045, 61051 ], [ 61994, 62000 ], [ 65221, 65227 ], [ 65582, 65588 ], [ 65838, 65844 ], [ 67624, 67630 ], [ 67740, 67746 ], [ 68684, 68690 ], [ 69088, 69094 ], [ 70022, 70028 ], [ 70243, 70249 ], [ 70785, 70791 ], [ 71289, 71295 ], [ 72025, 72031 ], [ 72193, 72199 ], [ 72919, 72925 ], [ 75237, 75243 ], [ 75324, 75330 ], [ 75374, 75380 ], [ 75861, 75867 ], [ 75963, 75969 ], [ 76651, 76657 ], [ 76727, 76733 ], [ 77078, 77084 ], [ 77326, 77332 ], [ 77535, 77541 ], [ 78383, 78389 ], [ 81280, 81286 ], [ 81527, 81533 ], [ 81654, 81660 ], [ 86172, 86178 ], [ 89452, 89458 ] ], [ [ 865, 868 ], [ 16470, 16473 ], [ 18935, 18938 ], [ 19141, 19144 ], [ 37996, 37999 ], [ 38864, 38867 ], [ 39675, 39678 ], [ 39765, 39768 ], [ 62764, 62767 ], [ 79074, 79077 ] ], [ [ 876, 895 ], [ 81411, 81430 ] ], [ [ 922, 942 ], [ 66093, 66113 ] ], [ [ 944, 960 ], [ 87373, 87389 ], [ 87507, 87523 ], [ 87656, 87672 ], [ 87799, 87815 ], [ 87944, 87960 ], [ 88103, 88119 ], [ 88269, 88285 ], [ 88605, 88621 ], [ 89007, 89023 ], [ 89159, 89175 ], [ 89295, 89311 ] ], [ [ 1003, 1004 ], [ 1068, 1090 ], [ 7732, 7760 ], [ 85937, 85964 ], [ 89394, 89420 ], [ 1133, 1155 ], [ 7945, 7973 ], [ 9304, 9332 ], [ 86069, 86096 ] ], [ [ 1029, 1048 ], [ 7762, 7781 ], [ 8005, 8024 ] ], [ [ 1057, 1067 ] ], [ [ 2011, 2038 ], [ 7673, 7700 ] ], [ [ 7506, 7529 ], [ 1893, 1916 ] ], [ [ 7715, 7731 ] ], [ [ 85921, 85936 ], [ 10582, 10597 ] ], [ [ 89379, 89393 ] ] ]
''' Problem 017 If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total. If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used? NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage. Solution: Copyright 2017 Dave Cuthbert, MIT License ''' ones_names = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'] tens_names = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty', 'sixty', 'seventy', 'eighty', 'ninety'] def build_words(n): if n == 1000: return 'one' + 'thousand' elif n > 99: hundreds = ones_names[int(n / 100)] + 'hundred' n = n % 100 if n == 0: return hundreds return hundreds + 'and' + build_words(n) elif n > 19: tens = tens_names[int(n / 10)] n = n % 10 if n == 0: return tens return tens + ones_names[n] else: return ones_names[n] def solve_problem(): total_letters = 0 for n in range(1,1001): total_letters += len(build_words(n)) return(total_letters) if __name__ == "__main__": print(solve_problem())
[ [ [ 590, 600 ], [ 1083, 1093 ], [ 1376, 1386 ], [ 1415, 1425 ] ], [ [ 852, 862 ], [ 1268, 1278 ] ], [ [ 979, 990 ], [ 1221, 1232 ], [ 1536, 1547 ] ], [ [ 1440, 1453 ], [ 1619, 1632 ] ] ]
import _plotly_utils.basevalidators class TypesrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="typesrc", parent_name="scattergeo.marker.gradient", **kwargs ): super(TypesrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator): def __init__( self, plotly_name="type", parent_name="scattergeo.marker.gradient", **kwargs ): super(TypeValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "style"), values=kwargs.pop("values", ["radial", "horizontal", "vertical", "none"]), **kwargs ) import _plotly_utils.basevalidators class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="colorsrc", parent_name="scattergeo.marker.gradient", **kwargs ): super(ColorsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs ) import _plotly_utils.basevalidators class ColorValidator(_plotly_utils.basevalidators.ColorValidator): def __init__( self, plotly_name="color", parent_name="scattergeo.marker.gradient", **kwargs ): super(ColorValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, array_ok=kwargs.pop("array_ok", True), edit_type=kwargs.pop("edit_type", "calc"), role=kwargs.pop("role", "style"), **kwargs )
[ [ [ 7, 35 ], [ 61, 74 ] ], [ [ 44, 60 ], [ 232, 248 ] ], [ [ 480, 508 ], [ 531, 544 ] ], [ [ 517, 530 ], [ 706, 719 ] ], [ [ 1090, 1118 ], [ 1145, 1158 ] ], [ [ 1127, 1144 ], [ 1317, 1334 ] ], [ [ 1566, 1594 ], [ 1618, 1631 ] ], [ [ 1603, 1617 ], [ 1789, 1803 ] ] ]
from dataclasses import dataclass, field from datetime import datetime from typing import Optional from .cloud_provider import CloudProvider from .instance import Instance from .instance_status import InstanceStatus @dataclass class AwsInstance(Instance): """Extends Instance to add fields specific to the AWS compute sources.""" type: str = field(default="co.yellowdog.platform.model.AwsInstance", init=False) id: Optional[str] = field(default=None, init=False) """The ID of this instance.""" instanceLifecycle: Optional[str] = None """The AWS EC2 instance lifecycle value for this instance.""" createdTime: Optional[datetime] = None """The date and time when this instance was first created.""" sourceId: Optional[str] = None """The ID of the compute source from which this instance was provisioned.""" imageId: Optional[str] = None """The machine image ID used for this instance.""" instanceType: Optional[str] = None """The machine type of this instance.""" provider: Optional[CloudProvider] = None """The cloud provider that supplies this instance.""" region: Optional[str] = None """The region where this instance is provisioned.""" status: Optional[InstanceStatus] = None """The status of this instance.""" subregion: Optional[str] = None """The subregion where this instance is provisioned.""" privateIpAddress: Optional[str] = None """The private IP address of this instance.""" publicIpAddress: Optional[str] = None """The public IP address of this instance.""" hostname: Optional[str] = None """The hostname of this instance."""
[ [ [ 24, 33 ], [ 220, 229 ] ], [ [ 35, 40 ], [ 353, 358 ], [ 446, 451 ] ], [ [ 62, 70 ], [ 649, 657 ] ], [ [ 90, 98 ], [ 430, 438 ], [ 536, 544 ], [ 640, 648 ], [ 746, 754 ], [ 861, 869 ], [ 955, 963 ], [ 1035, 1043 ], [ 1136, 1144 ], [ 1226, 1234 ], [ 1312, 1320 ], [ 1415, 1423 ], [ 1508, 1516 ], [ 1593, 1601 ] ], [ [ 128, 141 ], [ 1044, 1057 ] ], [ [ 164, 172 ], [ 248, 256 ] ], [ [ 202, 216 ], [ 1235, 1249 ] ], [ [ 236, 247 ] ] ]
"""Basic PDFs are provided here. Gauss, exponential... that can be used together with Functors to build larger models. """ # Copyright (c) 2021 zfit import contextlib import numpy as np import tensorflow as tf import zfit.z.numpy as znp from zfit import z from ..core.basepdf import BasePDF from ..core.space import ANY_LOWER, ANY_UPPER, Space from ..util import ztyping from ..util.exception import (AnalyticIntegralNotImplemented, BreakingAPIChangeError) from ..util.warnings import warn_advanced_feature class Exponential(BasePDF): _N_OBS = 1 def __init__(self, lam=None, obs: ztyping.ObsTypeInput = None, name: str = "Exponential", lambda_=None): """Exponential function exp(lambda * x). The function is normalized over a finite range and therefore a pdf. So the PDF is precisely defined as :math:`\\frac{ e^{\\lambda \\cdot x}}{ \\int_{lower}^{upper} e^{\\lambda \\cdot x} dx}` Args: lam: Accessed as parameter "lambda". obs: The :py:class:`~zfit.Space` the pdf is defined in. name: Name of the pdf. dtype: """ if lambda_ is not None: if lam is None: lam = lambda_ else: raise BreakingAPIChangeError("The 'lambda' parameter has been renamed from 'lambda_' to 'lam'.") params = {'lambda': lam} super().__init__(obs, name=name, params=params) self._calc_numerics_data_shift = lambda: z.constant(0.) if not self.space.has_limits: warn_advanced_feature("Exponential pdf relies on a shift of the input towards 0 to keep the numerical " f"stability high. The space {self.space} does not have limits set and no shift" f" will occure. To set it manually, set _numerics_data_shift to the expected" f" average values given to this function _in case you want things to be set_." f"If this sounds unfamiliar, regard this as an error and use a normalization range.", identifier='exp_shift') self._set_numerics_data_shift(self.space) def _unnormalized_pdf(self, x): lambda_ = self.params['lambda'] x = x.unstack_x() probs = znp.exp(lambda_ * (self._shift_x(x))) tf.debugging.assert_all_finite(probs, f"Exponential PDF {self} has non valid values. This is likely caused" f" by numerical problems: if the exponential is too steep, this will" f" yield NaNs or infs. Make sure that your lambda is small enough and/or" f" the initial space is in the same" f" region as your data (and norm_range, if explicitly set differently)." f" If this issue still persists, please oben an issue on Github:" f" https://github.com/zfit/zfit") return probs # Don't use exp! will overflow. def _shift_x(self, x): return x - self._calc_numerics_data_shift() @contextlib.contextmanager def _set_numerics_data_shift(self, limits): if limits: def calc_numerics_data_shift(): lower, upper = [], [] for limit in limits: low, up = limit.rect_limits lower.append(z.convert_to_tensor(low[:, 0])) upper.append(z.convert_to_tensor(up[:, 0])) lower = z.convert_to_tensor(lower) upper = z.convert_to_tensor(upper) lower_val = znp.min(lower, axis=0) upper_val = znp.max(upper, axis=0) return (upper_val + lower_val) / 2 old_value = self._calc_numerics_data_shift self._calc_numerics_data_shift = calc_numerics_data_shift yield self._calc_numerics_data_shift = old_value else: yield # All hooks are needed to set the right shift when "entering" the pdf. The norm range is taken where both are # available. No special need needs to be taken for sampling (it samples from the correct region, the limits, and # uses the predictions by the `unnormalized_prob` -> that is shifted correctly def _single_hook_integrate(self, limits, norm_range, x): with self._set_numerics_data_shift(norm_range): return super()._single_hook_integrate(limits, norm_range, x=x) def _single_hook_analytic_integrate(self, limits, norm_range): with self._set_numerics_data_shift(limits=norm_range): return super()._single_hook_analytic_integrate(limits, norm_range) def _single_hook_numeric_integrate(self, limits, norm_range): with self._set_numerics_data_shift(limits=norm_range): return super()._single_hook_numeric_integrate(limits, norm_range) def _single_hook_partial_integrate(self, x, limits, norm_range): with self._set_numerics_data_shift(limits=norm_range): return super()._single_hook_partial_integrate(x, limits, norm_range) def _single_hook_partial_analytic_integrate(self, x, limits, norm_range): with self._set_numerics_data_shift(limits=norm_range): return super()._single_hook_partial_analytic_integrate(x, limits, norm_range) def _single_hook_partial_numeric_integrate(self, x, limits, norm_range): with self._set_numerics_data_shift(limits=norm_range): return super()._single_hook_partial_numeric_integrate(x, limits, norm_range) # def _single_hook_normalization(self, limits): # with self._set_numerics_data_shift(limits=limits): # return super()._single_hook_normalization(limits) # # # TODO: remove component_norm_range? But needed for integral? # def _single_hook_unnormalized_pdf(self, x, name): # if component_norm_range.limits_are_false: # component_norm_range = self.space # if component_norm_range.limits_are_set: # with self._set_numerics_data_shift(limits=component_norm_range): # return super()._single_hook_unnormalized_pdf(x, name) # else: # return super()._single_hook_unnormalized_pdf(x, name) # def _single_hook_pdf(self, x, norm_range): with self._set_numerics_data_shift(limits=norm_range): return super()._single_hook_pdf(x, norm_range) # def _single_hook_log_pdf(self, x, norm_range): with self._set_numerics_data_shift(limits=norm_range): return super()._single_hook_log_pdf(x, norm_range) def _single_hook_sample(self, n, limits, x=None): with self._set_numerics_data_shift(limits=limits): return super()._single_hook_sample(n, limits, x) def _exp_integral_from_any_to_any(limits, params, model): lambda_ = params['lambda'] lower, upper = limits.rect_limits # if any(np.isinf([lower, upper])): # raise AnalyticIntegralNotImplemented integral = _exp_integral_func_shifting(lambd=lambda_, lower=lower, upper=upper, model=model) return integral[0] def _exp_integral_func_shifting(lambd, lower, upper, model): def raw_integral(x): return z.exp(lambd * (model._shift_x(x))) / lambd # needed due to overflow in exp otherwise lower_int = raw_integral(x=lower) upper_int = raw_integral(x=upper) integral = (upper_int - lower_int) return integral def exp_icdf(x, params, model): lambd = params['lambda'] x = z.unstack_x(x) x = model._shift_x(x) return znp.log(lambd * x) / lambd # Exponential.register_inverse_analytic_integral(exp_icdf) # TODO: register icdf for exponential # TODO: cleanup, make cdf registrable _and_ inverse integral, but real limits = Space(axes=0, limits=(ANY_LOWER, ANY_UPPER)) Exponential.register_analytic_integral(func=_exp_integral_from_any_to_any, limits=limits)
[ [ [ 159, 169 ], [ 3293, 3303 ] ], [ [ 178, 189 ] ], [ [ 197, 213 ], [ 2415, 2417 ] ], [ [ 222, 241 ], [ 2369, 2372 ], [ 7784, 7787 ], [ 3812, 3815 ], [ 3863, 3866 ] ], [ [ 259, 260 ], [ 7732, 7733 ], [ 1513, 1514 ], [ 3586, 3587 ], [ 3651, 3652 ], [ 3706, 3707 ], [ 3757, 3758 ], [ 7439, 7440 ] ], [ [ 289, 296 ], [ 563, 570 ] ], [ [ 322, 331 ], [ 8014, 8023 ] ], [ [ 333, 342 ], [ 8025, 8034 ] ], [ [ 344, 349 ], [ 7992, 7997 ] ], [ [ 369, 376 ], [ 627, 634 ] ], [ [ 407, 437 ] ], [ [ 469, 491 ], [ 1283, 1305 ] ], [ [ 521, 542 ], [ 1579, 1600 ] ], [ [ 551, 562 ], [ 8037, 8048 ] ], [ [ 7005, 7034 ], [ 8081, 8110 ] ], [ [ 7342, 7369 ], [ 7231, 7258 ] ], [ [ 7667, 7675 ] ], [ [ 7983, 7989 ], [ 8119, 8125 ] ] ]
import os import math ''' Metric goal is reached ''' #Sytem settings dev_mode = False grid_data_folder = os.path.join(os.getcwd(), 'raw_data_generation', 'input') raw_data_folder = os.path.join(os.getcwd(), 'raw_data') datasets_folder = os.path.join(os.getcwd(), 'datasets') test_data_folder = os.path.join(os.getcwd(), 'test') models_folder = os.path.join(os.getcwd(), 'models') local_machine_tz = 'Europe/Berlin' # timezone; it's important for Powerfactory #Deep learning settings learning_config = { "dataset": "PV_noPV_7day_20k", "RNN model settings": [1, 2, 6, 2], # number of input features, number of output features, number of features in hidden state, number of of layers "number of epochs": 100, "learning rate": 1*10**-6, "activation function": 'tanh', # relu, tanh "mini batch size": 60, "optimizer": 'Adam', # Adam, SGD "k folds": 5, #choose 1 to not do crossval "cross_validation": True, "early stopping": False, "LR adjustment": 'LR controlled', #None, 'warm up' , 'LR controlled' "percentage of epochs for warm up": 10, #warm up not performed if percentage of epochs for warm up * epochs > epochs "train test split": 0.2, #if int, used as number up testing examples; if float, used as share of data "baseline": False, "metrics": ['accuracy', 'precision_macro', 'recall_macro', 'f1_macro'], "cross_val_metrics": ['fit_time', 'test_accuracy', 'test_precision_macro', 'test_recall_macro', 'test_f1_macro'], "plot samples": True, "classifier": "RNN" # RNN } ######################################################################### ### only change if new dataset or raw data should be created ### ######################################################################### # Dataset settings raw_data_set_name = 'PV_noPV' #'malfunctions_in_LV_grid_dataset', 'PV_noPV', dummy dataset_available = False #set to False to recreate instances from raw data raw_data_available = True #set to False to generate raw data using the simulation; leave True if DIGSILENT POWRFACTORY is not available add_data = True #raw_data_available = False has to be set for this! set add_data = True to add more data to raw data; add_noise = False accuracy = 0.01 #accuracy according to the Genauigkeitsklasse of SmartMeter (1 = 1% i.e.) sample_length = 7 * 96 #96 datapoints per day smartmeter_ratedvoltage_range = [400, 415] smartmeter_voltage_range = [363, 457] number_of_samples = 20000 share_of_positive_samples = 0.5 #should be 0.5! only chose values that yield real numbers as invers i.e. 0.2, 0.25, 0.5 > otherwise number of samples corrupted number_of_grids = len([i for i in os.listdir(grid_data_folder) if os.path.isdir(os.path.join(grid_data_folder, i))]) float_decimal = 5 #decimals in dataset #Powerfactory settings user = 'FellnerD' system_language = 0 #chose 0 for english, 1 for german according to the lagnuage of powerfactory installed on the system parallel_computing = True cores = 12 #cores to be used for parallel computing (when 64 available use 12 - 24) reduce_result_file_size = True #save results as integers to save memory in csv just_voltages = True #if False also P and Q results given # Simulation settings sim_length = 365 #simulation length in days (has to be equal or bigger than sample length if sim_length < sample_length/96: print('Choose different simulation length or sample length (sim_length >= sample_length') if raw_data_set_name == 'PV_noPV': positive_samples_per_simrun = 5 #data from how many terminals are there in the grid minimally > determines how many yearly simulations have to be run and used for dataset creation simruns = math.ceil((number_of_samples * share_of_positive_samples) / (positive_samples_per_simrun * number_of_grids) / (sim_length * 96/sample_length)) elif raw_data_set_name == 'malfunctions_in_LV_grid_dataset': simruns = math.ceil((number_of_samples * share_of_positive_samples) / (number_of_grids) / int( (sim_length * 96 / sample_length) - 1)) elif raw_data_set_name == 'dummy': terms_per_simrun = 5 #data from how many terminals are there in the grid minimally > determines how many yearly simulations have to be run and used for dataset creation simruns = math.ceil((number_of_samples * share_of_positive_samples) / (terms_per_simrun * number_of_grids) / (sim_length * 96/sample_length)) else: simruns = 10 #number of datasets produced and also used per grid (location of malfunction/PVs... is varied) step_size = 15 #simulation step size in minutes percentage = 25 #percentage of busses with active PVs (PV proliferation) control_curve_choice = 0 #for all PVs: choose control curve for 0 = cos(phi)(P), 1 = Q(P), 2 = brokenQ(P) (flat curve) broken_control_curve_choice = 3 #for broken PV: choose control curve for 0 = cos(phi)(P), 1 = Q(P), 2 = broken Q(P) (flat curve), 3 = wrong Q(P) (inversed curve) number_of_broken_devices = 1 #define number of devices to expereince malfunctions during simulation load_scaling = 100 #general load scaling for all loads in simulation (does not apply to setup) generation_scaling = 100 #general generation scaling for all generation units in simulation (does not apply to setup) whole_year = True #if True malfunction is present from start of simulation on; if False malfunction is at random point t_start = None #default(None): times inferred from profiles in data t_end = None #t_start = pd.Timestamp('2017-01-01 00:00:00', tz='utc') # example for custom sim time #t_end = pd.Timestamp('2018-01-01 00:00:00', tz='utc') - pd.Timedelta(step_size + 'T')
[ [ [ 7, 9 ], [ 107, 109 ], [ 120, 122 ], [ 183, 185 ], [ 196, 198 ], [ 239, 241 ], [ 252, 254 ], [ 296, 298 ], [ 309, 311 ], [ 346, 348 ], [ 359, 361 ], [ 2918, 2920 ], [ 2950, 2952 ], [ 2964, 2966 ] ], [ [ 17, 21 ], [ 4048, 4052 ], [ 4266, 4270 ], [ 4636, 4640 ] ], [ [ 71, 79 ] ], [ [ 88, 104 ], [ 2929, 2945 ], [ 2977, 2993 ] ], [ [ 165, 180 ] ], [ [ 221, 236 ] ], [ [ 277, 293 ] ], [ [ 330, 343 ] ], [ [ 382, 398 ] ], [ [ 487, 502 ] ], [ [ 1894, 1911 ], [ 3814, 3831 ], [ 4196, 4213 ], [ 4404, 4421 ] ], [ [ 1995, 2012 ] ], [ [ 2093, 2111 ] ], [ [ 2250, 2258 ] ], [ [ 2399, 2408 ] ], [ [ 2417, 2425 ] ], [ [ 2539, 2552 ], [ 3702, 3715 ], [ 4175, 4188 ], [ 4378, 4391 ], [ 4752, 4765 ] ], [ [ 2610, 2639 ] ], [ [ 2653, 2677 ] ], [ [ 2691, 2708 ], [ 4059, 4076 ], [ 4277, 4294 ], [ 4647, 4664 ] ], [ [ 2717, 2742 ], [ 4079, 4104 ], [ 4297, 4322 ], [ 4667, 4692 ] ], [ [ 2884, 2899 ], [ 4139, 4154 ], [ 4327, 4342 ], [ 4716, 4731 ] ], [ [ 3001, 3014 ] ], [ [ 3086, 3090 ] ], [ [ 3104, 3119 ] ], [ [ 3245, 3263 ] ], [ [ 3271, 3276 ] ], [ [ 3384, 3407 ] ], [ [ 3472, 3485 ] ], [ [ 3572, 3582 ], [ 3689, 3699 ], [ 4159, 4169 ], [ 4360, 4370 ], [ 4736, 4746 ] ], [ [ 3850, 3877 ], [ 4109, 4136 ] ], [ [ 4038, 4045 ] ], [ [ 4256, 4263 ] ], [ [ 4438, 4454 ], [ 4697, 4713 ] ], [ [ 4626, 4633 ] ], [ [ 4778, 4785 ] ], [ [ 4909, 4918 ] ], [ [ 4982, 4992 ] ], [ [ 5079, 5099 ] ], [ [ 5213, 5240 ] ], [ [ 5383, 5407 ] ], [ [ 5494, 5506 ] ], [ [ 5610, 5628 ] ], [ [ 5743, 5753 ] ], [ [ 5884, 5891 ] ], [ [ 5977, 5982 ] ] ]
#!/usr/bin/env python3 """ Check lesson files and their contents. """ import os import glob import re from argparse import ArgumentParser from util import (Reporter, read_markdown, load_yaml, check_unwanted_files, require) __version__ = '0.3' # Where to look for source Markdown files. SOURCE_DIRS = ['', '_episodes', '_extras'] # Where to look for source Rmd files. SOURCE_RMD_DIRS = ['_episodes_rmd'] # Required files: each entry is ('path': YAML_required). # FIXME: We do not yet validate whether any files have the required # YAML headers, but should in the future. # The '%' is replaced with the source directory path for checking. # Episodes are handled specially, and extra files in '_extras' are also handled # specially. This list must include all the Markdown files listed in the # 'bin/initialize' script. REQUIRED_FILES = { '%/CODE_OF_CONDUCT.md': True, '%/CONTRIBUTING.md': False, '%/LICENSE.md': True, '%/README.md': False, '%/_extras/discuss.md': True, '%/_extras/guide.md': True, '%/index.md': True, '%/reference.md': True, '%/setup.md': True, } # Episode filename pattern. P_EPISODE_FILENAME = re.compile(r'/_episodes/(\d\d)-[-\w]+.md$') # Pattern to match lines ending with whitespace. P_TRAILING_WHITESPACE = re.compile(r'\s+$') # Pattern to match figure references in HTML. P_FIGURE_REFS = re.compile(r'<img[^>]+src="([^"]+)"[^>]*>') # Pattern to match internally-defined Markdown links. P_INTERNAL_LINK_REF = re.compile(r'\[([^\]]+)\]\[([^\]]+)\]') # Pattern to match reference links (to resolve internally-defined references). P_INTERNAL_LINK_DEF = re.compile(r'^\[([^\]]+)\]:\s*(.+)') # Pattern to match {% include ... %} statements P_INTERNAL_INCLUDE_LINK = re.compile(r'^{% include ([^ ]*) %}$') # What kinds of blockquotes are allowed? KNOWN_BLOCKQUOTES = { 'callout', 'challenge', 'checklist', 'discussion', 'keypoints', 'objectives', 'prereq', 'quotation', 'solution', 'testimonial' } # What kinds of code fragments are allowed? KNOWN_CODEBLOCKS = { 'error', 'output', 'source', 'language-bash', 'html', 'language-make', 'language-matlab', 'language-python', 'language-r', 'language-shell', 'language-sql' } # What fields are required in teaching episode metadata? TEACHING_METADATA_FIELDS = { ('title', str), ('teaching', int), ('exercises', int), ('questions', list), ('objectives', list), ('keypoints', list) } # What fields are required in break episode metadata? BREAK_METADATA_FIELDS = { ('layout', str), ('title', str), ('break', int) } # How long are lines allowed to be? # Please keep this in sync with .editorconfig! MAX_LINE_LEN = 100 def main(): """Main driver.""" args = parse_args() args.reporter = Reporter() check_config(args.reporter, args.source_dir) check_source_rmd(args.reporter, args.source_dir, args.parser) args.references = read_references(args.reporter, args.reference_path) docs = read_all_markdown(args.source_dir, args.parser) check_fileset(args.source_dir, args.reporter, list(docs.keys())) check_unwanted_files(args.source_dir, args.reporter) for filename in list(docs.keys()): checker = create_checker(args, filename, docs[filename]) checker.check() args.reporter.report() if args.reporter.messages and not args.permissive: exit(1) def parse_args(): """Parse command-line arguments.""" parser = ArgumentParser(description="""Check episode files in a lesson.""") parser.add_argument('-l', '--linelen', default=False, action="store_true", dest='line_lengths', help='Check line lengths') parser.add_argument('-p', '--parser', default=None, dest='parser', help='path to Markdown parser') parser.add_argument('-r', '--references', default=None, dest='reference_path', help='path to Markdown file of external references') parser.add_argument('-s', '--source', default=os.curdir, dest='source_dir', help='source directory') parser.add_argument('-w', '--whitespace', default=False, action="store_true", dest='trailing_whitespace', help='Check for trailing whitespace') parser.add_argument('--permissive', default=False, action="store_true", dest='permissive', help='Do not raise an error even if issues are detected') args, extras = parser.parse_known_args() require(args.parser is not None, 'Path to Markdown parser not provided') require(not extras, 'Unexpected trailing command-line arguments "{0}"'.format(extras)) return args def check_config(reporter, source_dir): """Check configuration file.""" config_file = os.path.join(source_dir, '_config.yml') config = load_yaml(config_file) reporter.check_field(config_file, 'configuration', config, 'kind', 'lesson') reporter.check_field(config_file, 'configuration', config, 'carpentry', ('swc', 'dc', 'lc', 'cp')) reporter.check_field(config_file, 'configuration', config, 'title') reporter.check_field(config_file, 'configuration', config, 'email') for defaults in [ {'values': {'root': '.', 'layout': 'page'}}, {'values': {'root': '..', 'layout': 'episode'}, 'scope': {'type': 'episodes', 'path': ''}}, {'values': {'root': '..', 'layout': 'page'}, 'scope': {'type': 'extras', 'path': ''}} ]: reporter.check(defaults in config.get('defaults', []), 'configuration', '"root" not set to "." in configuration') def check_source_rmd(reporter, source_dir, parser): """Check that Rmd episode files include `source: Rmd`""" episode_rmd_dir = [os.path.join(source_dir, d) for d in SOURCE_RMD_DIRS] episode_rmd_files = [os.path.join(d, '*.Rmd') for d in episode_rmd_dir] results = {} for pat in episode_rmd_files: for f in glob.glob(pat): data = read_markdown(parser, f) dy = data['metadata'] if dy: reporter.check_field(f, 'episode_rmd', dy, 'source', 'Rmd') def read_references(reporter, ref_path): """Read shared file of reference links, returning dictionary of valid references {symbolic_name : URL} """ if not ref_path: raise Warning("No filename has been provided.") result = {} urls_seen = set() with open(ref_path, 'r') as reader: for (num, line) in enumerate(reader, 1): if P_INTERNAL_INCLUDE_LINK.search(line): continue m = P_INTERNAL_LINK_DEF.search(line) message = '{}: {} not a valid reference: {}' require(m, message.format(ref_path, num, line.rstrip())) name = m.group(1) url = m.group(2) message = 'Empty reference at {0}:{1}' require(name, message.format(ref_path, num)) unique_name = name not in result unique_url = url not in urls_seen reporter.check(unique_name, ref_path, 'Duplicate reference name {0} at line {1}', name, num) reporter.check(unique_url, ref_path, 'Duplicate definition of URL {0} at line {1}', url, num) result[name] = url urls_seen.add(url) return result def read_all_markdown(source_dir, parser): """Read source files, returning {path : {'metadata':yaml, 'metadata_len':N, 'text':text, 'lines':[(i, line, len)], 'doc':doc}} """ all_dirs = [os.path.join(source_dir, d) for d in SOURCE_DIRS] all_patterns = [os.path.join(d, '*.md') for d in all_dirs] result = {} for pat in all_patterns: for filename in glob.glob(pat): data = read_markdown(parser, filename) if data: result[filename] = data return result def check_fileset(source_dir, reporter, filenames_present): """Are all required files present? Are extraneous files present?""" # Check files with predictable names. required = [p.replace('%', source_dir) for p in REQUIRED_FILES] missing = set(required) - set(filenames_present) for m in missing: reporter.add(None, 'Missing required file {0}', m) # Check episode files' names. seen = [] for filename in filenames_present: if '_episodes' not in filename: continue m = P_EPISODE_FILENAME.search(filename) if m and m.group(1): seen.append(m.group(1)) else: reporter.add( None, 'Episode {0} has badly-formatted filename', filename) # Check for duplicate episode numbers. reporter.check(len(seen) == len(set(seen)), None, 'Duplicate episode numbers {0} vs {1}', sorted(seen), sorted(set(seen))) # Check that numbers are consecutive. seen = sorted([int(s) for s in seen]) clean = True for i in range(len(seen) - 1): clean = clean and ((seen[i+1] - seen[i]) == 1) reporter.check(clean, None, 'Missing or non-consecutive episode numbers {0}', seen) def create_checker(args, filename, info): """Create appropriate checker for file.""" for (pat, cls) in CHECKERS: if pat.search(filename): return cls(args, filename, **info) return NotImplemented class CheckBase: """Base class for checking Markdown files.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): """Cache arguments for checking.""" self.args = args self.reporter = self.args.reporter # for convenience self.filename = filename self.metadata = metadata self.metadata_len = metadata_len self.text = text self.lines = lines self.doc = doc self.layout = None def check(self): """Run tests.""" self.check_metadata() self.check_line_lengths() self.check_trailing_whitespace() self.check_blockquote_classes() self.check_codeblock_classes() self.check_defined_link_references() def check_metadata(self): """Check the YAML metadata.""" self.reporter.check(self.metadata is not None, self.filename, 'Missing metadata entirely') if self.metadata and (self.layout is not None): self.reporter.check_field( self.filename, 'metadata', self.metadata, 'layout', self.layout) def check_line_lengths(self): """Check the raw text of the lesson body.""" if self.args.line_lengths: over = [i for (i, l, n) in self.lines if ( n > MAX_LINE_LEN) and (not l.startswith('!'))] self.reporter.check(not over, self.filename, 'Line(s) too long: {0}', ', '.join([str(i) for i in over])) def check_trailing_whitespace(self): """Check for whitespace at the ends of lines.""" if self.args.trailing_whitespace: trailing = [ i for (i, l, n) in self.lines if P_TRAILING_WHITESPACE.match(l)] self.reporter.check(not trailing, self.filename, 'Line(s) end with whitespace: {0}', ', '.join([str(i) for i in trailing])) def check_blockquote_classes(self): """Check that all blockquotes have known classes.""" for node in self.find_all(self.doc, {'type': 'blockquote'}): cls = self.get_val(node, 'attr', 'class') self.reporter.check(cls in KNOWN_BLOCKQUOTES, (self.filename, self.get_loc(node)), 'Unknown or missing blockquote type {0}', cls) def check_codeblock_classes(self): """Check that all code blocks have known classes.""" for node in self.find_all(self.doc, {'type': 'codeblock'}): cls = self.get_val(node, 'attr', 'class') self.reporter.check(cls in KNOWN_CODEBLOCKS, (self.filename, self.get_loc(node)), 'Unknown or missing code block type {0}', cls) def check_defined_link_references(self): """Check that defined links resolve in the file. Internally-defined links match the pattern [text][label]. """ result = set() for node in self.find_all(self.doc, {'type': 'text'}): for match in P_INTERNAL_LINK_REF.findall(node['value']): text = match[0] link = match[1] if link not in self.args.references: result.add('"{0}"=>"{1}"'.format(text, link)) self.reporter.check(not result, self.filename, 'Internally-defined links may be missing definitions: {0}', ', '.join(sorted(result))) def find_all(self, node, pattern, accum=None): """Find all matches for a pattern.""" assert isinstance(pattern, dict), 'Patterns must be dictionaries' if accum is None: accum = [] if self.match(node, pattern): accum.append(node) for child in node.get('children', []): self.find_all(child, pattern, accum) return accum def match(self, node, pattern): """Does this node match the given pattern?""" for key in pattern: if key not in node: return False val = pattern[key] if isinstance(val, str): if node[key] != val: return False elif isinstance(val, dict): if not self.match(node[key], val): return False return True @staticmethod def get_val(node, *chain): """Get value one or more levels down.""" curr = node for selector in chain: curr = curr.get(selector, None) if curr is None: break return curr def get_loc(self, node): """Convenience method to get node's line number.""" result = self.get_val(node, 'options', 'location') if self.metadata_len is not None: result += self.metadata_len return result class CheckNonJekyll(CheckBase): """Check a file that isn't translated by Jekyll.""" def check_metadata(self): self.reporter.check(self.metadata is None, self.filename, 'Unexpected metadata') class CheckIndex(CheckBase): """Check the main index page.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): super().__init__(args, filename, metadata, metadata_len, text, lines, doc) self.layout = 'lesson' def check_metadata(self): super().check_metadata() self.reporter.check(self.metadata.get('root', '') == '.', self.filename, 'Root not set to "."') class CheckEpisode(CheckBase): """Check an episode page.""" def check(self): """Run extra tests.""" super().check() self.check_reference_inclusion() def check_metadata(self): super().check_metadata() if self.metadata: if 'layout' in self.metadata: if self.metadata['layout'] == 'break': self.check_metadata_fields(BREAK_METADATA_FIELDS) else: self.reporter.add(self.filename, 'Unknown episode layout "{0}"', self.metadata['layout']) else: self.check_metadata_fields(TEACHING_METADATA_FIELDS) def check_metadata_fields(self, expected): """Check metadata fields.""" for (name, type_) in expected: if name not in self.metadata: self.reporter.add(self.filename, 'Missing metadata field {0}', name) elif not isinstance(self.metadata[name], type_): self.reporter.add(self.filename, '"{0}" has wrong type in metadata ({1} instead of {2})', name, type(self.metadata[name]), type_) def check_reference_inclusion(self): """Check that links file has been included.""" if not self.args.reference_path: return for (i, last_line, line_len) in reversed(self.lines): if last_line: break require(last_line, 'No non-empty lines in {0}'.format(self.filename)) include_filename = os.path.split(self.args.reference_path)[-1] if include_filename not in last_line: self.reporter.add(self.filename, 'episode does not include "{0}"', include_filename) class CheckReference(CheckBase): """Check the reference page.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): super().__init__(args, filename, metadata, metadata_len, text, lines, doc) self.layout = 'reference' class CheckGeneric(CheckBase): """Check a generic page.""" def __init__(self, args, filename, metadata, metadata_len, text, lines, doc): super().__init__(args, filename, metadata, metadata_len, text, lines, doc) CHECKERS = [ (re.compile(r'CONTRIBUTING\.md'), CheckNonJekyll), (re.compile(r'README\.md'), CheckNonJekyll), (re.compile(r'index\.md'), CheckIndex), (re.compile(r'reference\.md'), CheckReference), (re.compile(r'_episodes/.*\.md'), CheckEpisode), (re.compile(r'.*\.md'), CheckGeneric) ] if __name__ == '__main__': main()
[ [ [ 80, 82 ], [ 4285, 4287 ], [ 5233, 5235 ], [ 6282, 6284 ], [ 6361, 6363 ], [ 8237, 8239 ], [ 8307, 8309 ], [ 17767, 17769 ] ], [ [ 90, 94 ], [ 6480, 6484 ], [ 8419, 8423 ] ], [ [ 102, 104 ], [ 1176, 1178 ], [ 1294, 1296 ], [ 1377, 1379 ], [ 1498, 1500 ], [ 1640, 1642 ], [ 1752, 1754 ], [ 18536, 18538 ], [ 18591, 18593 ], [ 18640, 18642 ], [ 18684, 18686 ], [ 18736, 18738 ], [ 18789, 18791 ] ], [ [ 126, 140 ], [ 3538, 3552 ] ], [ [ 160, 168 ], [ 2851, 2859 ] ], [ [ 170, 183 ], [ 6515, 6528 ], [ 8454, 8467 ] ], [ [ 185, 194 ], [ 5286, 5295 ] ], [ [ 196, 216 ], [ 3184, 3204 ] ], [ [ 236, 243 ], [ 4931, 4938 ], [ 5020, 5027 ], [ 7257, 7264 ], [ 7438, 7445 ], [ 17653, 17660 ] ], [ [ 246, 257 ] ], [ [ 310, 321 ], [ 8274, 8285 ] ], [ [ 392, 407 ], [ 6319, 6334 ] ], [ [ 845, 859 ], [ 8794, 8808 ] ], [ [ 1155, 1173 ], [ 9105, 9123 ] ], [ [ 1270, 1291 ], [ 11961, 11982 ] ], [ [ 1361, 1374 ] ], [ [ 1476, 1495 ], [ 13444, 13463 ] ], [ [ 1618, 1637 ], [ 7154, 7173 ] ], [ [ 1726, 1749 ], [ 7090, 7113 ] ], [ [ 1833, 1850 ], [ 12490, 12507 ] ], [ [ 2069, 2085 ], [ 12952, 12968 ] ], [ [ 2350, 2374 ], [ 16755, 16779 ] ], [ [ 2578, 2599 ], [ 16463, 16484 ] ], [ [ 2750, 2762 ], [ 11489, 11501 ] ], [ [ 2775, 2779 ], [ 18861, 18865 ] ], [ [ 3470, 3480 ], [ 2818, 2828 ] ], [ [ 5142, 5154 ], [ 2866, 2878 ] ], [ [ 6149, 6165 ], [ 2915, 2931 ] ], [ [ 6711, 6726 ], [ 2999, 3014 ] ], [ [ 8038, 8055 ], [ 3063, 3080 ] ], [ [ 8571, 8584 ], [ 3115, 3128 ] ], [ [ 9893, 9907 ], [ 3294, 3308 ] ], [ [ 10124, 10133 ], [ 15311, 15320 ], [ 15574, 15583 ], [ 16065, 16074 ], [ 18037, 18046 ], [ 18306, 18315 ] ], [ [ 15296, 15310 ], [ 18569, 18583 ], [ 18618, 18632 ] ], [ [ 15563, 15573 ], [ 18666, 18676 ] ], [ [ 16052, 16064 ], [ 18769, 18781 ] ], [ [ 18022, 18036 ], [ 18714, 18728 ] ], [ [ 18293, 18305 ], [ 18812, 18824 ] ], [ [ 18518, 18526 ], [ 10001, 10009 ] ] ]
# Imports import torch from labml_nn.transformers.switch import SwitchTransformer, SwitchTransformerLayer, SwitchFeedForward from labml_nn.transformers import MultiHeadAttention from labml_nn.transformers.feed_forward import FeedForward import numpy as np from transformers import AutoConfig, AutoModel import torch.nn as nn import math from torch.utils.data import Dataset, DataLoader from sklearn.metrics.pairwise import cosine_similarity from sklearn.metrics import mean_squared_error from random import choice from sklearn.decomposition import PCA from copy import deepcopy from transformers import BertModel, BertConfig # Custom dataset function to store Open Subtitles data class CustomDataset(torch.utils.data.Dataset): 'Characterizes a dataset for PyTorch' def __init__(self, input_ids, token_type_ids, attention_masks): 'Initialization' self.input_ids = input_ids self.token_type_ids = token_type_ids self.attention_masks = attention_masks def __len__(self): 'Denotes the total number of samples' return len(self.input_ids) def __getitem__(self, index): 'Generates one sample of data' input_id = self.input_ids[index] token_type_ID = self.token_type_ids[index] attention_mask = self.attention_masks[index] sample = {'input_ids':input_id, 'token_type_ids':token_type_ID , 'attention_mask':attention_mask} return sample # Weights init and switch init initialise the weights for the model as desribed in Switch Transformer paper def weights_init(tensor: torch.Tensor): if isinstance(tensor, nn.Linear): switch_init(tensor.weight.data) torch.nn.init.zeros_(tensor.bias.data) if isinstance(tensor, nn.LayerNorm): torch.nn.init.zeros_(tensor.weight.data) torch.nn.init.zeros_(tensor.bias.data) def switch_init(tensor: torch.Tensor, s: float = 0.1, mean: float=0) -> torch.Tensor: fan_in, fan_out = torch.nn.init._calculate_fan_in_and_fan_out(tensor) std = math.sqrt(s/fan_in) return torch.nn.init.trunc_normal_(tensor=tensor, mean=mean, std=std) class LaBSE_Switch(nn.Module): """ Torch module for to create a Switch Transformer for LaBSE. Can be used for other BERT based models too, just change the input_id tokenization and word_embedding module. Inputs: config = dictionary of configuration word_embeddings_module = torch module mapping token ids to word embeddings Forward: Input_ids = ids using labse tokenizer attention_mask = binary, indicates to model which tokens should be attended to, and which should not. Outputs: outputs = a dictionary containing x, counts, route_prob, n_dropped, logits, attention, values See Switch Transformer paper to understand all except: attention, values and logits, which are used during knowledge distillation. """ def __init__(self, config, word_embeddings_module): super().__init__() # set the switch transformer as the actual neural net self.switch_model = SwitchTransformer( SwitchTransformerLayer( d_model=config['d_model'], attn=MultiHeadAttention(config['heads'], config['d_model'], config['dropout']), feed_forward=SwitchFeedForward( capacity_factor=config['capacity_factor'], drop_tokens=config['drop_tokens'], is_scale_prob=config['is_scale_prob'], n_experts=config['n_experts'], expert=FeedForward(config['d_model'], config['d_ff'], config['dropout_ffn']), d_model=config['d_model']), dropout_prob=config['dropout']), config['n_layers'], d_out = int(768), dropout_prob = config['dropout']) # initialise weights # self.switch_model.apply(weights_init) # module that maps input tokens into embedding vectors self.word_embeddings = word_embeddings_module # get attention weights from teacher # self.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches) def weight_init_from_teacher(self, teacher_model, int_matches): """ Initialises attention modules of student with those of the teacher for the --- specific to LaBSE and DistilSwitch int_matches should be a list of tuples of [(teacher_layer, student_layer),...] e.g. int_matches = [(5,0),(11,2)] --> give attention weights of teacher layer 5 to student layer 0 """ # teacher_model=load_teacher(device=torch.device('cuda')) self.switch_model.layers[int_matches[1]].attn.query.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.query.weight self.switch_model.layers[int_matches[1]].attn.query.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.query.bias self.switch_model.layers[int_matches[1]].attn.key.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.key.weight self.switch_model.layers[int_matches[1]].attn.key.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.key.bias self.switch_model.layers[int_matches[1]].attn.value.linear.weight = teacher_model.encoder.layer[int_matches[0]].attention.self.value.weight self.switch_model.layers[int_matches[1]].attn.value.linear.bias = teacher_model.encoder.layer[int_matches[0]].attention.self.value.bias self.switch_model.layers[int_matches[1]].attn.output.weight = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.weight self.switch_model.layers[int_matches[1]].attn.output.bias = teacher_model.encoder.layer[int_matches[0]].attention.output.dense.bias # self.switch_model.layers[int_matches[1]].norm_ff.weight = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.weight # self.switch_model.layers[int_matches[1]].norm_ff.bias = teacher_model.encoder.layer[int_matches[0]].output.LayerNorm.bias def forward(self, input_ids, token_type_ids=None, attention_mask=None): # masks and token type ids not used, as we're just creating sentence embeddings for classification tasks # word embeddings of shape [batch, seq_len, d_model] input_embeddings = self.word_embeddings(input_ids) # model input on shape [seq_len, batch, d_model] and mask _batch,_seq_len,_n_hid = input_embeddings.shape #print(_n_hid) # call switch transformer outputs = self.switch_model(torch.reshape(input_embeddings, (_seq_len, _batch, _n_hid)), attention_mask=None) return outputs # function to blackbox load the student for distillation - can be switch or bert based def load_student(name, student_config, device, teacher_model, int_matches, N_LAYERS): if name!='switch': # for pretrained bert models - setup config student_config = BertConfig.from_pretrained(name) student_config.num_hidden_layers = N_LAYERS student_config.output_hidden_states = True student_config.output_attentions = True student_config.use_cache = True student_config.is_decoder = True # load model and set input embeddings student_model = BertModel.from_pretrained(name, config=student_config) student_model.set_input_embeddings(teacher_model.get_input_embeddings()) student_model = student_model.float() student_model.to(device=device) return student_model if name=='switch': # create compressed word embeddings from those of the teacher word_embeddings = deepcopy(teacher_model.get_input_embeddings()) compressed_word_embeddings = word_embedding_compression(word_embeddings, student_config['d_model']) # create student model student_model = LaBSE_Switch(config=student_config, word_embeddings_module=compressed_word_embeddings) # initialise weights student_model.switch_model.apply(weights_init) student_model.weight_init_from_teacher(teacher_model=teacher_model, int_matches=int_matches) # convert model to float32 and move to device student_model = student_model.float() student_model.to(device=device) return student_model # loads teacher model from Huggingface def load_teacher(device): teacher_config = AutoConfig.from_pretrained('sentence-transformers/LaBSE') teacher_config.output_hidden_states = True teacher_config.output_attentions = True teacher_config.use_cache = True teacher_config.is_decoder = True teacher_model = AutoModel.from_pretrained('sentence-transformers/LaBSE', config=teacher_config) teacher_model.float() # needs to be 32 bit precision to get decent results from distillation teacher_model.to(device=device) return teacher_model # Adaptor for BERT based models def simple_adaptor(batch, model_outputs): # values need to be reformatted from Huggingface 'past_key_values' output values = [] for i in model_outputs['past_key_values']: values.append(i[1]) values = torch.stack(values) attentions = [] for j in model_outputs['attentions']: attentions.append(inv_softmax(j)) attentions = torch.stack(attentions) # we use pooler output as logits return {'logits': model_outputs['pooler_output'], 'hidden': model_outputs['hidden_states'], #'attention': model_outputs['attentions'], 'attention':attentions, 'inputs_mask': batch['attention_mask'], 'value_relation': values, 'pooler_output':model_outputs['pooler_output']} def inv_softmax(x,C=-50): # reverses softmax operation - used in teacher_adaptor # C variable sets the min value of the scores, -50 works well. result = torch.log(x) result = torch.where(result <= float('-inf'), torch.full_like(result,C), result) return result def teacher_adaptor(batch, model_outputs): # selects relevant model and batch outputs used for distillation loss calculation values = [] for i in model_outputs['past_key_values']: values.append(i[1]) values = torch.stack(values) attentions = [] for j in model_outputs['attentions']: attentions.append(inv_softmax(j)) attentions = torch.stack(attentions) # print(model_outputs['pooler_output'].requires_grad) return {#'logits': model_outputs['last_hidden_state'], 'logits':model_outputs['pooler_output'], 'hidden': model_outputs['hidden_states'], #'attention': model_outputs['attentions'], 'attention': attentions, 'inputs_mask': batch['attention_mask'], 'value_relation': values, 'pooler_output':model_outputs['pooler_output']} # adaptor for switch model def switch_student_adaptor(batch, model_outputs): # selects relevant model and batch outputs and reformats them # needs to have same shapes as teacher adaptor # reformat attention layers, len, len, batch_size, heads = model_outputs['attention'].shape attention = model_outputs['attention'].reshape(layers, batch_size, heads, len, len) # reformat logits len, batch_size, d_model = model_outputs['logits'].shape logits = model_outputs['logits'].reshape(batch_size, len, d_model) # print(model_outputs['pooler_output'].requires_grad) # reformat values layers, len, batch_size, heads, embedding_per_head = model_outputs['values'].shape values = model_outputs['values'].reshape(layers, batch_size, heads, len, embedding_per_head) return {#'logits': logits, 'logits':model_outputs['pooler_output'], 'counts': model_outputs['counts'], 'attention': attention, 'inputs_mask': batch['attention_mask'], 'route_prob': model_outputs['route_prob'], 'n_dropped': model_outputs['n_dropped'], 'value_relation': values} # Predict function evaluates model every epoch to show training progress def predict(model, teacher_model, eval_dataset, step, device, STUDENT, BATCH_SIZE, eval_metric='cosine_similarity', feedback=True): ''' model = student_model teacher_model = labse eval_dataset = num of dev set samples to test model on per callback device = cuda or cpu student = switch or !switch eval_metric = metric to evaluate the model - mse or cosine_similarity ''' model.eval() student_logits = [] teacher_logits =[] batch_counts = [] batch_n_dropped = [] batch_route_prob = [] dataloader = DataLoader(eval_dataset,batch_size=BATCH_SIZE) print('Running callback function on {} dev set samples...'.format(len(eval_dataset))) for batch in dataloader: input_ids = batch['input_ids'].to(device) attention_mask = batch['attention_mask'].to(device) with torch.no_grad(): model_outputs = model(input_ids=input_ids, attention_mask=attention_mask) logits_S = model_outputs['pooler_output'] logits_T = teacher_model(input_ids=input_ids, attention_mask=attention_mask)['pooler_output'] cpu_logits_S = logits_S.detach().cpu() cpu_logits_T = logits_T.detach().cpu() if STUDENT=='switch' and feedback==True: counts = model_outputs['counts'].detach().cpu() n_dropped = model_outputs['n_dropped'] route_prob = model_outputs['route_prob'].detach().cpu() for i in range(len(cpu_logits_S)): student_logits.append(cpu_logits_S[i].numpy()) teacher_logits.append(cpu_logits_T[i].numpy()) if STUDENT=='switch' and feedback==True: for i in range(len(counts)): batch_counts.append(counts[i].numpy()) batch_n_dropped.append(n_dropped[i]) batch_route_prob.append(route_prob[i].numpy()) model.train() student_logits = np.array(student_logits) teacher_logits = np.array(teacher_logits) if eval_metric=='cosine_similarity': similarities = np.diag(cosine_similarity(student_logits, teacher_logits)) print ("Average cosine similarity for these samples: ", np.mean(similarities)) if eval_metric=='mse': mse_error = mean_squared_error(student_logits, teacher_logits) print ("Average mean squared error for these samples: ", mse_error) if STUDENT=='switch' and feedback==True: switch_counts = np.array(batch_counts) switch_n_dropped = np.array(batch_n_dropped) switch_route_prob = np.array(batch_route_prob) print('SWITCH BEHAVIOUR:') print('Counts Shape: \n', switch_counts.shape) print('Counts: \n', switch_counts) print('N_dropped: \n', switch_n_dropped) print('Route Prob: \n', switch_route_prob) return torch.Tensor([np.mean(similarities)]) # generates random parameters for hyperparam tuning def generate_random_params(params): # input: params dictionary containing lists of possible values chosen_params = {} for param in params: chosen_params[param] = choice(params[param]) return chosen_params def word_embedding_compression(word_embedding_module, d_model): """ Compresses a given word_embedding_module (type torch.Embedding) into a module of d_model dimensionality. """ word_embedding_matrix = word_embedding_module.weight assert word_embedding_matrix.shape[1]>=d_model, 'The desired word embedding dimensionality is greater than the teacher word embeddings. That is not compression! Make d_model smaller.' # return the module if it's the same dimensionality if word_embedding_matrix.shape[1]==d_model: return word_embedding_module # else compress pca = PCA(n_components = d_model) compressed_word_embedding_matrix = pca.fit_transform(word_embedding_matrix.detach().cpu().numpy()) compressed_word_embedding_matrix = torch.from_numpy(compressed_word_embedding_matrix) word_embedding_module.weight = torch.nn.parameter.Parameter(compressed_word_embedding_matrix) return word_embedding_module
[ [ [ 17, 22 ], [ 702, 707 ], [ 1571, 1576 ], [ 1672, 1677 ], [ 1760, 1765 ], [ 1809, 1814 ], [ 1921, 1926 ], [ 1873, 1878 ], [ 1957, 1962 ], [ 2051, 2056 ], [ 6780, 6785 ], [ 9459, 9464 ], [ 9605, 9610 ], [ 10186, 10191 ], [ 10212, 10217 ], [ 10249, 10254 ], [ 10536, 10541 ], [ 10682, 10687 ], [ 13274, 13279 ], [ 15312, 15317 ], [ 16413, 16418 ], [ 16499, 16504 ] ], [ [ 64, 81 ], [ 3077, 3094 ] ], [ [ 83, 105 ], [ 3119, 3141 ] ], [ [ 107, 124 ], [ 3298, 3315 ] ], [ [ 159, 177 ], [ 3195, 3213 ] ], [ [ 225, 236 ], [ 3606, 3617 ] ], [ [ 244, 255 ], [ 14388, 14390 ], [ 14434, 14436 ], [ 14533, 14535 ], [ 14656, 14658 ], [ 14936, 14938 ], [ 14986, 14988 ], [ 15040, 15042 ], [ 15326, 15328 ] ], [ [ 281, 291 ], [ 8712, 8722 ] ], [ [ 293, 302 ], [ 8954, 8963 ] ], [ [ 310, 324 ], [ 2135, 2137 ], [ 1612, 1614 ], [ 1737, 1739 ] ], [ [ 332, 336 ], [ 2019, 2023 ] ], [ [ 366, 373 ] ], [ [ 375, 385 ], [ 12984, 12994 ] ], [ [ 423, 440 ], [ 14541, 14558 ] ], [ [ 469, 487 ], [ 14731, 14749 ] ], [ [ 507, 513 ], [ 15585, 15591 ] ], [ [ 548, 551 ], [ 16243, 16246 ] ], [ [ 569, 577 ], [ 7941, 7949 ] ], [ [ 603, 612 ], [ 7552, 7561 ] ], [ [ 614, 624 ], [ 7208, 7218 ] ], [ [ 688, 701 ] ], [ [ 1550, 1562 ], [ 8326, 8338 ] ], [ [ 1853, 1864 ], [ 1632, 1643 ] ], [ [ 2122, 2134 ], [ 8160, 8172 ] ], [ [ 7016, 7028 ] ], [ [ 8669, 8681 ] ], [ [ 9234, 9248 ] ], [ [ 10025, 10036 ], [ 9572, 9583 ], [ 10649, 10660 ] ], [ [ 10307, 10322 ] ], [ [ 11210, 11232 ] ], [ [ 12426, 12433 ] ], [ [ 15407, 15429 ] ], [ [ 15637, 15663 ], [ 8025, 8051 ] ] ]
import cv2 import argparse parser = argparse.ArgumentParser() parser.add_argument('-p2', "--path", help="path to input video") parser.add_argument('-p1', "--spath", help="path where the images should be stored") parser.add_argument('-n', "--num", help="number from which image label naming starts", type=int) args = parser.parse_args() num = args.num cap = cv2.VideoCapture(args.path) count = 0 path = args.spath print(args.num, args.path, args.spath) ret = True while ret: ret, frame=cap.read() count += 1 if count % 10 == 0: cv2.imwrite(path+str(num)+'.jpg', frame) print(path+str(num)+'.jpg') num += 1
[ [ [ 7, 10 ], [ 357, 360 ], [ 547, 550 ] ], [ [ 18, 26 ], [ 36, 44 ] ], [ [ 27, 33 ], [ 62, 68 ], [ 127, 133 ], [ 212, 218 ], [ 316, 322 ] ], [ [ 309, 313 ], [ 342, 346 ], [ 374, 378 ], [ 402, 406 ], [ 419, 423 ], [ 429, 433 ], [ 440, 444 ] ], [ [ 336, 339 ], [ 568, 571 ], [ 611, 614 ], [ 632, 635 ] ], [ [ 351, 354 ], [ 489, 492 ] ], [ [ 385, 390 ], [ 504, 509 ] ], [ [ 395, 399 ], [ 559, 563 ], [ 602, 606 ] ], [ [ 452, 455 ], [ 469, 472 ] ], [ [ 478, 481 ], [ 469, 472 ] ], [ [ 483, 488 ], [ 581, 586 ] ] ]
# Copyright 2019 Quantapix Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= class Justifier: def __init__(self, **kw): super().__init__(**kw) self.justs = [0] * 9 self.offsets = [(0, 0, 0, 1, 1, 1, 1, 1, 1), (0, -1, -2, 0, 0, 0, 1, 1, 1), (0, -1, -2, 0, -1, -2, 0, 0, 0)] def init_justs(self, justs): for i in justs: i = i // 3 os = self.offsets[i] if os: self.justs = [sum(x) for x in zip(self.justs, os)] self.offsets[i] = None def calc_just(self, justs): for i in justs: i = self.justs[i] + (i % 3) if i == 1: return 'justify-content-center' elif i > 1: return 'justify-content-end' return 'justify-content-start'
[ [ [ 691, 700 ] ] ]
"""A proto buffer based logging system for minitaur experiments. The logging system records the time since reset, base position, orientation, angular velocity and motor information (joint angle, speed, and torque) into a proto buffer. See minitaur_logging.proto for more details. The episode_proto is updated per time step by the environment and saved onto disk for each episode. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import datetime import os import time import tensorflow as tf import minitaur_logging_pb2 NUM_MOTORS = 8 def _update_base_state(base_state, values): base_state.x = values[0] base_state.y = values[1] base_state.z = values[2] def preallocate_episode_proto(episode_proto, max_num_steps): """Preallocate the memory for proto buffer. Dynamically allocating memory as the protobuf expands causes unexpected delay that is not tolerable with locomotion control. Args: episode_proto: The proto that holds the state/action data for the current episode. max_num_steps: The max number of steps that will be recorded in the proto. The state/data over max_num_steps will not be stored in the proto. """ for _ in range(max_num_steps): step_log = episode_proto.state_action.add() step_log.info_valid = False step_log.time.seconds = 0 step_log.time.nanos = 0 for _ in range(NUM_MOTORS): motor_state = step_log.motor_states.add() motor_state.angle = 0 motor_state.velocity = 0 motor_state.torque = 0 motor_state.action = 0 _update_base_state(step_log.base_position, [0, 0, 0]) _update_base_state(step_log.base_orientation, [0, 0, 0]) _update_base_state(step_log.base_angular_vel, [0, 0, 0]) def update_episode_proto(episode_proto, minitaur, action, step): """Update the episode proto by appending the states/action of the minitaur. Note that the state/data over max_num_steps preallocated (len(episode_proto.state_action)) will not be stored in the proto. Args: episode_proto: The proto that holds the state/action data for the current episode. minitaur: The minitaur instance. See envs.minitaur for details. action: The action applied at this time step. The action is an 8-element numpy floating-point array. step: The current step index. """ max_num_steps = len(episode_proto.state_action) if step >= max_num_steps: tf.logging.warning( "{}th step is not recorded in the logging since only {} steps were " "pre-allocated.".format(step, max_num_steps)) return step_log = episode_proto.state_action[step] step_log.info_valid = minitaur.IsObservationValid() time_in_seconds = minitaur.GetTimeSinceReset() step_log.time.seconds = int(time_in_seconds) step_log.time.nanos = int((time_in_seconds - int(time_in_seconds)) * 1e9) motor_angles = minitaur.GetMotorAngles() motor_velocities = minitaur.GetMotorVelocities() motor_torques = minitaur.GetMotorTorques() for i in range(minitaur.num_motors): step_log.motor_states[i].angle = motor_angles[i] step_log.motor_states[i].velocity = motor_velocities[i] step_log.motor_states[i].torque = motor_torques[i] step_log.motor_states[i].action = action[i] _update_base_state(step_log.base_position, minitaur.GetBasePosition()) _update_base_state(step_log.base_orientation, minitaur.GetBaseRollPitchYaw()) _update_base_state(step_log.base_angular_vel, minitaur.GetBaseRollPitchYawRate()) class MinitaurLogging(object): """A logging system that records the states/action of the minitaur.""" def __init__(self, log_path=None): self._log_path = log_path # TODO(jietan): Consider using recordio to write the logs. def save_episode(self, episode_proto): """Save episode_proto to self._log_path. self._log_path is the directory name. A time stamp is the file name of the log file. For example, when self._log_path is "/tmp/logs/", the actual log file would be "/tmp/logs/yyyy-mm-dd-hh:mm:ss". Args: episode_proto: The proto that holds the states/action for the current episode that needs to be save to disk. Returns: The full log path, including the directory name and the file name. """ if not self._log_path or not episode_proto.state_action: return self._log_path if not tf.gfile.Exists(self._log_path): tf.gfile.MakeDirs(self._log_path) ts = time.time() time_stamp = datetime.datetime.fromtimestamp(ts).strftime( "%Y-%m-%d-%H:%M:%S") log_path = os.path.join(self._log_path, "minitaur_log_{}".format(time_stamp)) with tf.gfile.Open(log_path, "w") as f: f.write(episode_proto.SerializeToString()) return log_path def restore_episode(self, log_path): """Restore the episodic proto from the log path. Args: log_path: The full path of the log file. Returns: The minitaur episode proto. """ with tf.gfile.Open(log_path) as f: content = f.read() episode_proto = minitaur_logging_pb2.MinitaurEpisode() episode_proto.ParseFromString(content) return episode_proto
[ [ [ 409, 424 ] ], [ [ 448, 456 ] ], [ [ 480, 494 ] ], [ [ 503, 511 ], [ 4512, 4520 ] ], [ [ 519, 521 ], [ 4602, 4604 ] ], [ [ 529, 533 ], [ 4483, 4487 ] ], [ [ 542, 558 ], [ 2453, 2455 ], [ 4401, 4403 ], [ 4440, 4442 ], [ 4706, 4708 ], [ 5025, 5027 ] ], [ [ 566, 586 ], [ 5102, 5122 ] ], [ [ 588, 598 ], [ 1419, 1429 ] ], [ [ 609, 627 ], [ 1601, 1619 ], [ 1659, 1677 ], [ 1720, 1738 ], [ 3285, 3303 ], [ 3358, 3376 ], [ 3438, 3456 ] ], [ [ 736, 761 ] ], [ [ 1783, 1803 ] ], [ [ 3549, 3564 ] ] ]
# -*- coding: utf-8 -*- from unittest import TestCase import six from popolo_data.importer import Popolo EXAMPLE_AREA = { "id": "area/tartu_linn", "identifiers": [ { "identifier": "Q3032626", "scheme": "wikidata" } ], "name": "Tartu linn", "other_names": [ { "lang": "fr", "name": "Dixième circonscription législative d'Estonie", "note": "multilingual" }, { "lang": "et", "name": "Valimisringkond nr 10", "note": "multilingual" }, { "lang": "en", "name": "Electoral District 10 (Tartu)", "note": "multilingual" } ], "type": "constituency" } class TestAreas(TestCase): def test_empty_file_gives_no_areas(self): popolo = Popolo({}) assert len(popolo.areas) == 0 def test_single_area_with_name(self): popolo = Popolo({"areas": [EXAMPLE_AREA]}) assert len(popolo.areas) == 1 area = popolo.areas[0] assert area.name == 'Tartu linn' def test_area_id(self): popolo = Popolo({"areas": [EXAMPLE_AREA]}) area = popolo.areas[0] assert area.id == 'area/tartu_linn' def test_area_type(self): popolo = Popolo({"areas": [EXAMPLE_AREA]}) area = popolo.areas[0] assert area.type == 'constituency' def test_area_identifiers(self): popolo = Popolo({"areas": [EXAMPLE_AREA]}) area = popolo.areas[0] assert area.identifiers == [ { "identifier": "Q3032626", "scheme": "wikidata" } ] def test_area_other_names(self): popolo = Popolo({"areas": [EXAMPLE_AREA]}) area = popolo.areas[0] assert area.other_names == [ { "lang": "fr", "name": "Dixième circonscription législative d'Estonie", "note": "multilingual" }, { "lang": "et", "name": "Valimisringkond nr 10", "note": "multilingual" }, { "lang": "en", "name": "Electoral District 10 (Tartu)", "note": "multilingual" } ] def test_area_wikidata(self): popolo = Popolo({"areas": [EXAMPLE_AREA]}) area = popolo.areas[0] assert area.wikidata == 'Q3032626' def test_area_repr(self): popolo = Popolo({"areas": [EXAMPLE_AREA]}) area = popolo.areas[0] if six.PY2: assert repr(area) == b"<Area: Tartu linn>" else: assert repr(area) == u"<Area: Tartu linn>" def test_area_identity_equality_and_inequality(self): popolo_a = Popolo({"areas": [EXAMPLE_AREA]}) area_a = popolo_a.areas[0] popolo_b = Popolo({"areas": [EXAMPLE_AREA]}) area_b = popolo_b.areas[0] assert area_a == area_b assert not (area_a != area_b)
[ [ [ 46, 54 ], [ 788, 796 ] ], [ [ 63, 66 ], [ 2624, 2627 ] ], [ [ 101, 107 ], [ 863, 869 ], [ 972, 978 ], [ 1162, 1168 ], [ 1319, 1325 ], [ 1482, 1488 ], [ 1756, 1762 ], [ 2392, 2398 ], [ 2548, 2554 ], [ 2835, 2841 ], [ 2923, 2929 ] ], [ [ 110, 122 ], [ 990, 1002 ], [ 1180, 1192 ], [ 1337, 1349 ], [ 1500, 1512 ], [ 1774, 1786 ], [ 2410, 2422 ], [ 2566, 2578 ], [ 2853, 2865 ], [ 2941, 2953 ] ], [ [ 778, 787 ] ] ]
__author__ = 'alvertisjo' from django.core.serializers import json import requests from requests.packages.urllib3 import Timeout from requests.packages.urllib3.exceptions import ConnectionError class OpenProductData(object): def getData(self): # rowStep=100 # currentPage=0 # #####documentation: http://pod.opendatasoft.com/api/doc/#doc-datasets-search # full_url='http://pod.opendatasoft.com/api/records/1.0/search' #http://pod.opendatasoft.com/api/records/1.0/search?dataset=pod_gtin&rows=10&start=11&facet=gpc_s_nm&facet=brand_nm&facet=owner_nm&facet=gln_nm&facet=prefix_nm # dataset='pod_gtin' # #print(full_url) # try: # response = requests.get(full_url, verify=False) # #print response # return response.json() # except ConnectionError as e: # This is the correct syntax # print "error: %s" %e # return response.json() # except Timeout as t: # This is the correct syntax # print "Timeout error: %s" %t # return json.dumps({"error":t.message}) # except: # return json.dumps([]) pass def readDataFromFile(self): pass def storeToGraph(self,data): # POST http://localhost:7474/db/data/transaction/commit # Accept: application/json; charset=UTF-8 # Content-Type: application/json url= 'http://snf-561492.vm.okeanos.grnet.gr:7474/' # { # "statements" : [ { # "statement" : "CREATE (n) RETURN id(n)" # } ] # } pass
[ [ [ 0, 10 ] ], [ [ 63, 67 ] ], [ [ 75, 83 ] ], [ [ 122, 129 ] ], [ [ 179, 194 ] ], [ [ 202, 217 ] ] ]
from django.views.generic import TemplateView class HomePageView(TemplateView): template_name = "home.html"
[ [ [ 33, 45 ], [ 66, 78 ] ], [ [ 53, 65 ] ] ]
from . import Simulations from . import Spacecraft
[ [ [ 14, 25 ] ], [ [ 41, 51 ] ] ]
""" @package: jsonutils @script: test_JsonUtils.py @purpose: Test Suite for JsonUtils. @created: Aug 26, 2017 @author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior @mailto: yorevs@hotmail.com @site: https://github.com/yorevs/homesetup @license: Please refer to <https://opensource.org/licenses/MIT> """ import os import unittest from hhslib.security import * PASSPHRASE = '12345' SAMPLE_IN_FILE_NAME = "resources/secret.in" SAMPLE_OUT_FILE_NAME = "resources/secret.out" OUT_FILE = "resources/outfile.out" OUT_FILE_GPG = "resources/outfile.out.gpg" ORIGINAL_FILE_CONTENTS = "HomeSetup Secrets" ENCODED_FILE_CONTENTS = "SG9tZVNldHVwIFNlY3JldHM=" class TestHhsLib(unittest.TestCase): # Setup tests def setUp(self): with open(SAMPLE_IN_FILE_NAME, 'w') as f_in: f_in.write(ORIGINAL_FILE_CONTENTS) with open(SAMPLE_OUT_FILE_NAME, 'w') as f_in: f_in.write(ENCODED_FILE_CONTENTS) # Teardown tests def tearDown(self): if os.path.exists(OUT_FILE): os.remove(OUT_FILE) if os.path.exists(OUT_FILE_GPG): os.remove(OUT_FILE_GPG) # TEST CASES ---------- # TC1 - Test encoding a file. def test_should_encode_file(self): with open(SAMPLE_IN_FILE_NAME, 'r') as f_in: contents = str(f_in.read().strip()) self.assertEquals(ORIGINAL_FILE_CONTENTS, contents) encode(SAMPLE_IN_FILE_NAME, OUT_FILE) with open(OUT_FILE, 'r') as f_out: contents = str(f_out.read().strip()) self.assertEquals(ENCODED_FILE_CONTENTS, contents) # TC2 - Test decoding a file. def test_should_decode_file(self): with open(SAMPLE_OUT_FILE_NAME, 'r') as f_in: contents = str(f_in.read().strip()) self.assertEquals(ENCODED_FILE_CONTENTS, contents) decode(SAMPLE_OUT_FILE_NAME, OUT_FILE) with open(OUT_FILE, 'r') as f_out: contents = str(f_out.read().strip()) self.assertEquals(ORIGINAL_FILE_CONTENTS, contents) # TC3 - Test encrypting a file. def test_should_encrypt_decrypt_file(self): with open(SAMPLE_IN_FILE_NAME, 'r') as f_in: contents = str(f_in.read().strip()) self.assertEquals(ORIGINAL_FILE_CONTENTS, contents) encrypt(SAMPLE_IN_FILE_NAME, OUT_FILE_GPG, PASSPHRASE) decrypt(OUT_FILE_GPG, OUT_FILE, PASSPHRASE) with open(OUT_FILE, 'r') as f_out: contents = str(f_out.read().strip()) self.assertEquals(ORIGINAL_FILE_CONTENTS, contents) # Program entry point. if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestHhsLib) unittest.TextTestRunner(verbosity=2).run(suite)
[ [ [ 330, 332 ], [ 1009, 1011 ], [ 1047, 1049 ], [ 1078, 1080 ], [ 1120, 1122 ] ], [ [ 340, 348 ], [ 688, 696 ], [ 2644, 2652 ], [ 2704, 2712 ] ], [ [ 378, 379 ], [ 1424, 1430 ], [ 1864, 1870 ], [ 2317, 2324 ], [ 2380, 2387 ] ], [ [ 381, 391 ], [ 2360, 2370 ], [ 2412, 2422 ] ], [ [ 403, 422 ], [ 766, 785 ], [ 1269, 1288 ], [ 1431, 1450 ], [ 2162, 2181 ], [ 2325, 2344 ] ], [ [ 447, 467 ], [ 866, 886 ], [ 1709, 1729 ], [ 1871, 1891 ] ], [ [ 494, 502 ], [ 1024, 1032 ], [ 1057, 1065 ], [ 1452, 1460 ], [ 1480, 1488 ], [ 1893, 1901 ], [ 1921, 1929 ], [ 2402, 2410 ], [ 2442, 2450 ] ], [ [ 529, 541 ], [ 1093, 1105 ], [ 1130, 1142 ], [ 2346, 2358 ], [ 2388, 2400 ] ], [ [ 573, 595 ], [ 824, 846 ], [ 1382, 1404 ], [ 2025, 2047 ], [ 2275, 2297 ], [ 2546, 2568 ] ], [ [ 618, 639 ], [ 925, 946 ], [ 1584, 1605 ], [ 1823, 1844 ] ], [ [ 677, 687 ], [ 2688, 2698 ] ], [ [ 2636, 2641 ], [ 2745, 2750 ] ] ]
import threading import requests # encryption import Encrypt import unicodedata from ttk import Style, Button, Label, Entry, Progressbar, Checkbutton from Tkinter import Tk, Frame, RIGHT, BOTH, RAISED from Tkinter import TOP, X, N, LEFT from Tkinter import END, Listbox, MULTIPLE from Tkinter import Toplevel, DISABLED from Tkinter import StringVar, Scrollbar from multiprocessing import Queue from random import choice, randint from fbchat import log, client from fbchat.graphql import * # Wrapper for the client class just in case we need to modify client to make it work class GuiClient(client.Client): def __init__(self, email, password, user_agent=None, max_tries=5, session_cookies=None, logging_level=logging.INFO): """ Initializes and logs in the client :param email: Facebook `email`, `id` or `phone number` :param password: Facebook account password :param user_agent: Custom user agent to use when sending requests. If `None`, user agent will be chosen from a premade list (see :any:`utils.USER_AGENTS`) :param max_tries: Maximum number of times to try logging in :param session_cookies: Cookies from a previous session (Will default to login if these are invalid) :param logging_level: Configures the `logging level <https://docs.python.org/3/library/logging.html#logging-levels>`_. Defaults to `INFO` :type max_tries: int :type session_cookies: dict :type logging_level: int :raises: FBchatException on failed login """ self.sticky, self.pool = (None, None) self._session = requests.session() self.req_counter = 1 self.seq = "0" self.payloadDefault = {} self.client = 'mercury' self.default_thread_id = None self.default_thread_type = None self.req_url = ReqUrl() self.most_recent_message = None self.most_recent_messages_queue = Queue() if not user_agent: user_agent = choice(USER_AGENTS) self._header = { 'Content-Type': 'application/x-www-form-urlencoded', 'Referer': self.req_url.BASE, 'Origin': self.req_url.BASE, 'User-Agent': user_agent, 'Connection': 'keep-alive', } handler.setLevel(logging_level) # If session cookies aren't set, not properly loaded or gives us an invalid session, then do the login if not session_cookies or not self.setSession(session_cookies) or not self.isLoggedIn(): self.login(email, password, max_tries) else: self.email = email self.password = password def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs): self.markAsDelivered(author_id, thread_id) self.markAsRead(author_id) if (message_object is not None): self.most_recent_message = message_object self.most_recent_messages_queue.put(message_object) def stopListening(self): """Cleans up the variables from startListening""" print("Logging off...") self.listening = False self.sticky, self.pool = (None, None) def listen(self, markAlive=True): """ Initializes and runs the listening loop continually :param markAlive: Whether this should ping the Facebook server each time the loop runs :type markAlive: bool """ self.startListening() self.onListening() while self.listening and self.doOneListen(markAlive): pass self.stopListening() class GUI(Frame): """ This is the root window """ def __init__(self, parent, client): self.queue = Queue() # I got sick of filling in the login parameters repeatedly, # for the sake of testing I will leave it like this and clear it before finishing the gui self.email = "" self.password = "" self.name = "" self.parent = parent self.initialized = False self.loadWindow = None self.remember = False self.client = None self.msg_list = None self.changingConvo = False self.loginScreen() def centerWindow(self, notself=None): """ This centers the window into place if notself is set, then it centers the notself window @param: notself - TKobject """ if notself is not None: # notself is primarly for progressbar sw = self.parent.winfo_screenwidth() sh = self.parent.winfo_screenheight() x = (sw - self.w / 2) / 2 y = (sh - self.h / 2) / 2 notself.geometry('%dx%d+%d+%d' % (self.w / 1.8, self.h / 1.8, x, y)) else: sw = self.parent.winfo_screenwidth() sh = self.parent.winfo_screenheight() x = (sw - self.w) / 2 y = (sh - self.h) / 2 self.parent.geometry('%dx%d+%d+%d' % (self.w, self.h, x, y)) def startWindow(self): """ This method starts/creates the window for the UI """ Frame.__init__(self, self.parent, background="white") self.style = Style() self.style.theme_use("default") self.pack(fill=BOTH, expand=1) if (not self.initialized): self.centerWindow() else: self.parent.geometry('%dx%d' % (self.w, self.h)) self.initialized = True def resetWindow(self): """ Resets the window """ if (self.initialized): self.destroy() if (self.loadWindow is not None): self.loadWindow.destroy() self.startWindow() def loginScreen(self): """ First screen that user will see, will require Facebook credentials to be inputted """ # Resetting window self.h = 150 self.w = 350 self.resetWindow() self.parent.title("Welcome") # Creating frame that takes in email emailFrame = Frame(self) emailFrame.pack(fill=X, side=TOP) emailLabel = Label(emailFrame, text="Email:", background="white") emailLabel.pack(side=LEFT, padx=15, pady=10) self.emailEntry = Entry(emailFrame, width=30) self.emailEntry.insert(0, self.email) self.emailEntry.pack(side=LEFT, padx=35, pady=10) # Done with email frame # Creating password frame passwordFrame = Frame(self) passwordFrame.pack(fill=X, side=TOP) passwordLabel = Label(passwordFrame, text="Password:", background="white") passwordLabel.pack(side=LEFT, padx=15, pady=10) self.passwordEntry = Entry(passwordFrame, show="*", width=30) self.passwordEntry.bind("<Return>", self.start) self.passwordEntry.insert(0, self.password) self.passwordEntry.pack(side=LEFT, padx=35, pady=10) # Done with password frame # Creating bottom buttons frame = Frame(self, borderwidth=1) frame.pack(fill=BOTH, expand=True) self.pack(fill=BOTH, expand=True) exitButton = Button(self, text="Exit", command=self.parent.destroy) exitButton.pack(side=RIGHT, padx=5, pady=5) self.loginButton = Button(self, text="Log In", command=self.start) self.loginButton.pack(side=RIGHT) # Done with bottom buttons def start(self, opt=""): """ Initiates login, starts loading screen. """ thread1 = ThreadedTask(self.queue, self.login) thread2 = ThreadedTask(self.queue, self.loadingScreen) thread2.start() thread1.start() self.checkThread(thread1, self.chatUI) def loadingScreen(self): """ This starts the loading screen and disables all buttons """ for i in self.winfo_children(): if Button == type(i): i.configure(state=DISABLED) self.loadWindow = Toplevel(self.parent) loadingstring = "Logging in..." loadinglabel = Label(self.loadWindow, text=loadingstring, background="white") progressbar = Progressbar(self.loadWindow, orient="horizontal", length=300, mode="indeterminate") progressbar.pack(pady=self.h / 10) loadinglabel.pack() self.centerWindow(self.loadWindow) self.loadWindow.title("Wait") progressbar.start() def login(self): """ Login with the inputted credentials from the loginScreen """ if(self.client is not None): if(self.client.isLoggedIn()): self.client.logout() self.email = self.emailEntry.get() self.password = self.passwordEntry.get() # This will log into Facebook with the given credentials self.client = GuiClient(self.email, self.password) print(self.client._fetchInfo(self.client.uid)[self.client.uid].get('first_name')) self.thread3 = ThreadedTask(self.queue, self.listen) self.thread3.start() def listen(self): """ We start the listening loop """ self.client.listen() def chatUI(self): """ Chat GUI page """ self.h = 350 self.w = 700 self.resetWindow() self.parent.title("Messenger") # We make the chat side of the UI self.right_frame = Frame(self) self.right_frame.pack(side=RIGHT, fill='y') self.messages_frame = Frame(self.right_frame) self.messages_frame.pack(side=TOP) self.my_msg = StringVar() # For messages to be sent. self.my_msg.set("") self.msg_scrollbar = Scrollbar(self.messages_frame) # Navigate through past messages # Following will contain the messages self.msg_list = Listbox(self.messages_frame, height=15, width=50, yscrollcommand=self.msg_scrollbar.set) self.msg_scrollbar.config(command=self.msg_list.yview) self.msg_scrollbar.pack(side=RIGHT, fill='y', padx=5) self.msg_list.pack(side=RIGHT) self.entry_field = Entry(self.right_frame, textvariable=self.my_msg) self.entry_field.bind("<Return>", self.send) self.send_button = Button(self.right_frame, text="Send", command=self.send) self.entry_field.pack(side="top", fill=X, padx=5, pady=5) self.send_button.pack(side="top") self.exitButton = Button(self.right_frame, text="Exit", command=self.exit) self.exitButton.pack(side="bottom", padx=5, pady=5) # We make the the side that contains the other users. self.left_frame = Frame(self) self.left_frame.pack(side=LEFT, fill='y') self.usr_scrollbar = Scrollbar(self.left_frame) self.usr_list = Listbox(self.left_frame, height=15, width=50, yscrollcommand=self.usr_scrollbar.set) self.usr_scrollbar.config(command=self.usr_list.yview) self.usr_search_bar = Entry(self.left_frame, textvariable="") self.usr_search_button = Button(self.left_frame, text="Search", command=self.search) self.usr_search_bar.pack(side="top", fill=X, pady=2, padx=1) self.usr_search_button.pack(side="top", fill=X, pady=2, padx=1) self.usr_scrollbar.pack(side=RIGHT, fill='y', padx=5) self.usr_list.pack(side=RIGHT, fill='y') # The user loading logic is in the search function self.search() self.usr_list.bind('<Double-1>', self.changeConvo) def search(self): fresh_users = self.client.fetchAllUsers() self.users = [] if (self.usr_search_bar.get() is not ""): for user in fresh_users: if (self.usr_search_bar.get() in user.name): self.users.append(user) else: self.users = fresh_users if (self.usr_list.size() is not 0): self.usr_list.delete(0, END) for user in self.users: self.usr_list.insert(END, " " + user.name) # By default I would just take the first conversation self.currentUser = self.users[0] # TODO: fix IndexOutOfRange Error when searched for a string not found self.usr_search_bar.delete(0, END) def send(self, _=""): """ Send messages, will send whatever is in the message field and then clear it """ plaintext = self.entry_field.get() key = randint(-60, 60) ciphertext = Encrypt.encrypt(plaintext, key) ciphertext = "{}Q_Q{}".format(key, ciphertext) message = Message(text=unicode(ciphertext, "ascii")) self.client.send(message, self.currentUser.uid) self.entry_field.delete(0, END) self.client.most_recent_message = message self.msg_list.insert(0, self.name + ": " + plaintext) self.msg_list.see(END) def changeConvo(self, param): """ When you click on another user in the chat we update the page """ print("CHANGING CONVO") selectionIndex = self.usr_list.curselection() self.currentUser = self.users[selectionIndex[0]] self.changingConvo = True self.updateConversation() def updateConversation(self): """ Clear the conversation box, reupdate with new conversation, pings facebook server if they got anything """ if (self.changingConvo): # we are changing the conversation/switching users print("[updateConversation] we are changing conversation") messages = self.client.fetchThreadMessages(self.currentUser.uid) self.msg_list.delete(0, END) for message in messages: text = self.decrypt_w_uc(message) self.msg_list.insert(0, self.client._fetchInfo(message.author)[message.author][ "first_name"] + ": " + text) # The message listbox will automatically look at the last/"most recent" message self.msg_list.see(END) # We no longer need to change the conversation self.changingConvo = False else: # same user, but checking for new messages # Sees last message from the message list box last_message = self.msg_list.get(END) if (self.client is not None and self.client.isLoggedIn() and self.client.most_recent_message is not None): msg_object = self.client.most_recent_message msg_author = self.client.most_recent_message.author name = "" if (msg_author is None): msg_author = self.name else: name = self.client._fetchInfo(msg_author)[msg_author]["first_name"] text = self.decrypt_w_uc(msg_object) new_last_message = name + ": " + text if (last_message != new_last_message): # This is checking if were updating the current convo or refreshing convo if (name + ": " in last_message): while (self.client.most_recent_messages_queue.empty() is not True): message = self.client.most_recent_messages_queue.get() text = self.decrypt_w_uc(message) self.msg_list.insert(END, self.client._fetchInfo(message.author)[message.author][ "first_name"] + ": " + text) self.msg_list.see(END) else: messages = self.client.fetchThreadMessages(self.currentUser.uid) self.msg_list.delete(0, END) for message in messages: text = self.decrypt_w_uc(message) self.msg_list.insert(0, self.client._fetchInfo(message.author)[message.author][ "first_name"] + ": " + text) self.msg_list.see(END) self.client.most_recent_message = messages[0] def decrypt_w_uc(self, message): """ Decrypt with unicode character check - will decrypt when necessary, and then convert unicode to ascii so TCL won't freak out Input: message -> fbchat.models.Message, Message object Output: clean_text -> String """ clean_text = "" if "Q_Q" in message.text: # to be decrypted key, ciphertext = message.text.split("Q_Q") clean_text = Encrypt.decrypt(ciphertext, int(key)) else: clean_text = message.text # now we do unicode and emoji clean_clean_text = "" for character in clean_text: # if character not in emoji.UNICODE_EMOJI: if type(character) is unicode: clean_clean_text += unicodedata.normalize('NFKD', character).encode('ascii', 'replace') else: clean_clean_text += character return clean_clean_text def exit(self): """ Stops listening and ends GUI """ self.client.stopListening() self.parent.destroy() def checkThread(self, thread, function): """ This function checks to see if the given thread is dead, if it is not, it recalls a new checkThread. After the thread is dead, it calls the given function @param: thread - ThreadedTask functoin - a function """ if thread.is_alive(): self.parent.after(1000, lambda: self.checkThread(thread, function)) else: function() class ThreadedTask(threading.Thread): """ Used for creating a threaded task """ def __init__(self, queue, function): """ Starts the threaded task @param: queue - Queue object function - a function """ threading.Thread.__init__(self) self.queue = queue self.function = function def run(self): """ Runs the function """ self.function() def tk_loop(root, ex): """ Checks for messages every half a second """ if (ex.msg_list is not None): ex.updateConversation() root.after(2000, tk_loop, root, ex) def initiate_tk_loop(root, ex): """ I honestly don't know how to thread this other than doing this terrible piece of code """ root.after(2000, tk_loop, root, ex) def removeEmoji(msg): """ removes non ASCII chars :param msg: :return: new_msg with emjoy char removed """ new_msg = "" for ch in msg: pass return new_msg if __name__ == "__main__": # create GUI root = Tk() root.resizable(width=False, height=False) ex = GUI(root, client) # make calls to api to load GUI with relavent information initiate_tk_loop(root, ex) root.mainloop()
[ [ [ 8, 17 ], [ 17745, 17754 ], [ 18012, 18021 ] ], [ [ 25, 33 ], [ 1619, 1627 ] ], [ [ 55, 62 ], [ 12524, 12531 ], [ 16581, 16588 ] ], [ [ 70, 81 ], [ 16911, 16922 ] ], [ [ 99, 104 ], [ 5243, 5248 ] ], [ [ 106, 112 ], [ 7180, 7186 ], [ 7314, 7320 ], [ 7936, 7942 ], [ 10311, 10317 ], [ 10503, 10509 ], [ 11104, 11110 ] ], [ [ 114, 119 ], [ 6167, 6172 ], [ 6605, 6610 ], [ 8111, 8116 ] ], [ [ 121, 126 ], [ 6300, 6305 ], [ 6750, 6755 ], [ 10181, 10186 ], [ 11031, 11036 ] ], [ [ 128, 139 ], [ 8196, 8207 ] ], [ [ 141, 152 ] ], [ [ 173, 175 ], [ 18824, 18826 ] ], [ [ 177, 182 ], [ 3630, 3635 ], [ 5168, 5173 ], [ 6091, 6096 ], [ 6523, 6528 ], [ 7046, 7051 ], [ 9480, 9485 ], [ 9574, 9579 ], [ 10709, 10714 ] ], [ [ 184, 189 ], [ 7264, 7269 ], [ 7397, 7402 ], [ 9527, 9532 ], [ 10089, 10094 ], [ 10146, 10151 ], [ 11344, 11349 ], [ 11401, 11406 ] ], [ [ 191, 195 ], [ 5314, 5318 ], [ 7097, 7101 ], [ 7139, 7143 ] ], [ [ 197, 203 ] ], [ [ 224, 227 ], [ 6140, 6143 ], [ 6575, 6578 ], [ 9636, 9639 ] ], [ [ 229, 230 ], [ 6132, 6133 ], [ 6567, 6568 ], [ 10415, 10416 ], [ 11215, 11216 ], [ 11287, 11288 ] ], [ [ 232, 233 ] ], [ [ 235, 239 ], [ 6249, 6253 ], [ 6408, 6412 ], [ 6696, 6700 ], [ 6936, 6940 ], [ 10755, 10759 ] ], [ [ 260, 263 ], [ 11982, 11985 ], [ 12053, 12056 ], [ 12289, 12292 ], [ 12763, 12766 ], [ 12906, 12909 ], [ 13689, 13692 ], [ 14048, 14051 ], [ 14311, 14314 ], [ 15380, 15383 ], [ 15548, 15551 ], [ 15716, 15719 ], [ 16043, 16046 ] ], [ [ 265, 272 ], [ 9899, 9906 ], [ 10852, 10859 ] ], [ [ 274, 282 ] ], [ [ 303, 311 ], [ 8026, 8034 ] ], [ [ 313, 321 ], [ 7989, 7997 ] ], [ [ 343, 352 ], [ 9664, 9673 ] ], [ [ 354, 363 ], [ 9762, 9771 ], [ 10801, 10810 ] ], [ [ 392, 397 ], [ 1947, 1952 ], [ 3744, 3749 ] ], [ [ 417, 423 ], [ 2008, 2014 ] ], [ [ 425, 432 ], [ 12486, 12493 ] ], [ [ 452, 455 ] ], [ [ 457, 463 ], [ 596, 602 ], [ 18894, 18900 ] ], [ [ 491, 492 ], [ 718, 725 ], [ 1856, 1862 ], [ 2015, 2026 ], [ 2299, 2306 ], [ 12629, 12636 ], [ 12642, 12649 ], [ 16866, 16873 ] ], [ [ 586, 595 ], [ 8904, 8913 ] ], [ [ 3626, 3629 ], [ 18884, 18887 ] ], [ [ 17732, 17744 ], [ 7559, 7571 ], [ 7614, 7626 ], [ 9054, 9066 ] ], [ [ 18204, 18211 ], [ 18370, 18377 ], [ 18550, 18557 ] ], [ [ 18395, 18411 ], [ 18969, 18985 ] ], [ [ 18575, 18586 ] ], [ [ 18817, 18821 ], [ 18833, 18837 ], [ 18888, 18892 ], [ 18986, 18990 ], [ 19000, 19004 ] ], [ [ 18879, 18881 ], [ 18992, 18994 ] ] ]
from pipdeptree import get_installed_distributions, build_dist_index, construct_tree from bs4 import BeautifulSoup from json import dump, load from urllib.request import urlretrieve from pathlib import Path from unittest import mock from pkginfo import SDist from johnnydep.cli import JohnnyDist import requests import setuptools import tarfile def read_packages(): with open("python_packages_list.json", "r") as f: package_info = load(f) return package_info def download(download_link, output_folder, package_name, version): url = download_link dst = Path(output_folder).joinpath("{}_{}.tar.gz".format(package_name, version)) urlretrieve(url, dst) def get_packages(package_name): """ Note that package name already starts with /simple/ This downloader only focuses on .tar.gz files :param package_name: :return: """ url = "https://pypi.org{}".format(package_name) r = requests.get(url) soup = BeautifulSoup(r.content, features='html.parser') tar = 0 for id, link in enumerate(soup.find_all('a', href=True)): if ".tar.gz" in link["href"]: download(link["href"], "downloaded_packages/tar", package_name.split("/")[-2], id) tar += 1 return {"tar": tar} def extract_info_from_setup(): with mock.patch.object(setuptools, 'setup') as mock_setup: import data_collector.downloaded_packages.setup args, kwargs = mock_setup.call_args print(kwargs) def unpack(package_name): with tarfile.open(package_name, mode="r:gz") as tf: tf.extractall() def parse(pkg_info): mypackage = SDist(pkg_info) return PackageInfo(version=mypackage.version, author=mypackage.author_email, license=mypackage.license, name=mypackage.name, maintainer=mypackage.maintainer_email, additional_details=mypackage.__dict__) def get_dependencies(package): url = 'https://pypi.org/pypi/{}/json' json = requests.get(url.format(package)).json() print(json.keys()) # return json def get_johnny_dep(package): dist = JohnnyDist(package, index_url=None, env=None, extra_index_url=None) return dist.serialise(fields=["name", "requires", "required_by", "project_name", "versions_available"], format=None, recurse=True) if __name__ == '__main__': # get_package_list() # get_packages("/simple/jupyter/") # unpack("downloaded_packages/tar/jupyter_1.tar.gz") # extract_info_from_setup() # print(parse("downloaded_packages/tar/jupyter_1.tar.gz").dump_details()) # print(pkg_resources.get_distribution("downloaded_packages/tar/jupyter_1.tar.gz")) print(get_dependencies("pandas")) # print(get_johnny_dep("ipython"))
[ [ [ 23, 50 ] ], [ [ 52, 68 ] ], [ [ 70, 84 ] ], [ [ 101, 114 ], [ 967, 980 ] ], [ [ 132, 136 ] ], [ [ 138, 142 ], [ 446, 450 ] ], [ [ 170, 181 ], [ 661, 672 ] ], [ [ 202, 206 ], [ 582, 586 ] ], [ [ 228, 232 ], [ 1312, 1316 ] ], [ [ 253, 258 ], [ 1628, 1633 ] ], [ [ 285, 295 ], [ 2131, 2141 ] ], [ [ 304, 312 ], [ 938, 946 ], [ 2007, 2015 ] ], [ [ 320, 330 ], [ 1330, 1340 ] ], [ [ 338, 345 ], [ 1518, 1525 ] ], [ [ 352, 365 ] ], [ [ 485, 493 ], [ 1141, 1149 ] ], [ [ 689, 701 ] ], [ [ 1276, 1299 ] ], [ [ 1487, 1493 ] ], [ [ 1595, 1600 ] ], [ [ 1927, 1943 ], [ 2692, 2708 ] ], [ [ 2095, 2109 ] ] ]
#!/usr/local/bin/python3.6 # Substitute average testing value from optimization import sys if len(sys.argv) != 2: sys.stderr.write('Usage: python3.X %s sy_fraction\n' % sys.argv[0]) raise SystemExit(1) sy_fraction = sys.argv[1] parameters = [] with open('input_data/infection_parameters.txt','r') as fin: parameters = fin.readlines() for ind,param in enumerate(parameters): if 'average fraction to get tested' in param: parameters[ind+1] = str(sy_fraction) + '\n' break with open('input_data/infection_parameters.txt','w') as fout: fout.writelines(parameters)
[ [ [ 88, 91 ], [ 100, 103 ], [ 117, 120 ], [ 172, 175 ], [ 221, 224 ] ], [ [ 207, 218 ], [ 455, 466 ] ], [ [ 234, 244 ] ], [ [ 306, 309 ], [ 325, 328 ] ], [ [ 312, 322 ], [ 369, 379 ], [ 431, 441 ], [ 563, 573 ] ], [ [ 346, 349 ], [ 442, 445 ] ], [ [ 350, 355 ], [ 422, 427 ] ], [ [ 540, 544 ], [ 547, 551 ] ] ]
""" Simple functions to manipulate strings """ # Replace the functions below with your implementation as described in the assignment def is_rhyme(word1, word2, k): """ Returns True if the last k letters of the two words are the same (case sensitive). Automatically returns False if either word contains less than k letters. """ if (k == 0): # Cannot compare if k is 0 return False if (len(word1) < k or len(word2) < k): # Return False if either word return False # contains less than k letters rev_word1 = word1[::-1] # Reverse word1 rev_word2 = word2[::-1] # Reverse word2 # Compare first k chars of reversed word1 and reversed word2 # Equivalent of comparing last k chars of word 1 and word2 return rev_word1[:k] == rev_word2[:k] # Test Cases # print(is_rhyme("hello", "world", 7)) # False # print(is_rhyme("hello", "llo", 3)) # True # print(is_rhyme("ello", "ello", 0)) # False # print(is_rhyme("elo", "ello", 3)) # False # print(is_rhyme("ello", "ello", 4)) # True # print(is_rhyme("ello", "ello", 5)) # False
[ [ [ 140, 148 ] ] ]
import sys if len(sys.argv) != 2: print("Usage: python3 orgmybmarks.py bookmarks.html") quit() file = open(sys.argv[1]) # file = open("in.html") fileout = open("out.html", "w") hreflist = [] # 读到第一个链接 numm = 1 while True: line = file.readline() if not line: break if line.find("HREF") == -1: continue num = line.find("HREF") href = line[num + 6:] num = href.find("\"") href = href[:num] hreflist.append(href) print("%d now %s" % (numm, href)) numm += 1 numbef = len(hreflist) hreflist = list(set(hreflist)) # 去重 numaft = len(hreflist) fir = '''<!DOCTYPE NETSCAPE-Bookmark-file-1> <!-- This is an automatically generated file. It will be read and overwritten. DO NOT EDIT! --> <META HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=UTF-8"> <TITLE>Bookmarks</TITLE> <H1>Bookmarks</H1> <DL><p> <DT><H3 ADD_DATE="1530070116" LAST_MODIFIED="1532090715" PERSONAL_TOOLBAR_FOLDER="true">书签栏</H3> <DL><p> ''' fileout.write(fir) for i in range(len(hreflist)): sec = " <DT><A HREF=\"%s\">%d</A>\n" % (hreflist[i], i) fileout.write(sec) end = ''' </DL><p> </DL><p>''' fileout.write(end) file.close() fileout.close() print("finished! now you have %d bookmarks, %d duplicated bookmarks deleted!" % (numaft, numbef - numaft))
[ [ [ 7, 10 ], [ 19, 22 ], [ 116, 119 ] ], [ [ 104, 108 ], [ 242, 246 ], [ 1182, 1186 ] ], [ [ 154, 161 ], [ 989, 996 ], [ 1110, 1117 ], [ 1163, 1170 ], [ 1195, 1202 ] ], [ [ 186, 194 ], [ 444, 452 ], [ 531, 539 ], [ 561, 569 ] ], [ [ 210, 214 ], [ 491, 495 ], [ 508, 512 ] ], [ [ 235, 239 ], [ 269, 273 ], [ 296, 300 ], [ 348, 352 ], [ 377, 381 ] ], [ [ 342, 345 ], [ 382, 385 ] ], [ [ 370, 374 ], [ 402, 406 ], [ 429, 433 ] ], [ [ 396, 399 ], [ 435, 438 ] ], [ [ 422, 426 ], [ 460, 464 ], [ 497, 501 ] ], [ [ 518, 524 ], [ 1300, 1306 ] ], [ [ 541, 549 ], [ 591, 599 ], [ 1027, 1035 ], [ 1090, 1098 ] ], [ [ 578, 584 ], [ 1292, 1298 ], [ 1309, 1315 ] ], [ [ 601, 604 ], [ 1003, 1006 ] ], [ [ 1012, 1013 ], [ 1099, 1100 ], [ 1103, 1104 ] ], [ [ 1043, 1046 ], [ 1124, 1127 ] ], [ [ 1129, 1132 ], [ 1177, 1180 ] ] ]
import main def test_execute(): count = main.results assert count == 400410
[ [ [ 7, 11 ], [ 45, 49 ] ], [ [ 17, 29 ] ] ]
import numpy as np import os import sys import matplotlib.pyplot as plt import chainconsumer from math import ceil # pyburst from . import mcmc_versions from . import mcmc_tools from . import burstfit from . import mcmc_params from pyburst.observations import obs_tools from pyburst.plotting import plot_tools from pyburst.grids.grid_strings import get_source_path, print_warning from pyburst.misc.pyprint import printv GRIDS_PATH = os.environ['KEPLER_GRIDS'] def default_plt_options(): """Initialise default plot parameters""" params = {'mathtext.default': 'regular', 'font.family': 'serif', 'text.usetex': False} plt.rcParams.update(params) default_plt_options() def save_plot(fig, prefix, save, source, version, display, chain=None, n_dimensions=None, n_walkers=None, n_steps=None, label=None, extension='.png', enforce_chain_info=True): """Handles saving/displaying of a figure passed to it """ if enforce_chain_info and (None in (n_dimensions, n_walkers, n_steps)): if chain is None: raise ValueError('Must provide chain, or specify each of ' '(n_dimensions, n_walkers, n_steps)') else: n_walkers, n_steps, n_dimensions = chain.shape if save: filename = mcmc_tools.get_mcmc_string(source=source, version=version, n_walkers=n_walkers, n_steps=n_steps, prefix=prefix, label=label, extension=extension) source_path = get_source_path(source) filepath = os.path.join(source_path, 'plots', prefix, f'{filename}') fig.savefig(filepath) if display: plt.show(block=False) else: plt.close(fig) def save_multiple_synth(series, source, version, n_steps, discard, n_walkers=960, walkers=True, posteriors=True, contours=False, display=False, mass_radius=True, synth=True, compressed=False): """Save plots for multiple series in a synthetic data batch """ # TODO reuse max_lhood point default_plt_options() for ser in series: if synth: full_source = f'{source}_{ser}' else: full_source = source chain = mcmc_tools.load_chain(full_source, n_walkers=n_walkers, n_steps=n_steps, version=version, compressed=compressed) if walkers: plot_walkers(chain, source=full_source, save=True, display=display, version=version) if posteriors: plot_posteriors(chain, source=full_source, save=True, discard=discard, display=display, version=version) if contours: plot_contours(chain, source=full_source, save=True, discard=discard, display=display, version=version) if mass_radius: plot_mass_radius(chain, source=full_source, save=True, discard=discard, display=display, version=version) def save_all_plots(source, version, discard, n_steps, n_walkers=1000, display=False, save=True, cap=None, posteriors=True, contours=True, redshift=True, mass_radius=True, verbose=True, compressed=False): """Saves (and/or displays) main MCMC plots """ chain = mcmc_tools.load_chain(source, version=version, n_steps=n_steps, n_walkers=n_walkers, verbose=verbose, compressed=compressed) if posteriors: printv('Plotting posteriors', verbose=verbose) plot_posteriors(chain, source=source, save=save, discard=discard, cap=cap, display=display, version=version) if contours: printv('Plotting contours', verbose=verbose) plot_contours(chain, source=source, save=save, discard=discard, cap=cap, display=display, version=version) if mass_radius: printv('Plotting mass-radius', verbose=verbose) plot_mass_radius(chain, source=source, save=save, discard=discard, cap=cap, display=display, version=version) if redshift: printv('Plotting redshift', verbose=verbose) plot_redshift(chain, source=source, save=save, discard=discard, cap=cap, display=display, version=version) def plot_contours(chain, discard, source, version, cap=None, display=True, save=False, truth_values=None, parameters=None, sigmas=np.linspace(0, 2, 5), cc=None, summary=False, fontsize=14, max_ticks=4): """Plots posterior contours of mcmc chain parameters : [str] specify which parameters to plot """ default_plt_options() if cc is None: pkeys = mcmc_versions.get_parameter(source, version, 'param_keys') pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys) cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels, discard=discard, cap=cap, sigmas=sigmas, summary=summary, fontsize=fontsize, max_ticks=max_ticks) if parameters is not None: parameters = plot_tools.convert_mcmc_labels(param_keys=parameters) # TODO: figsize if truth_values is not None: fig = cc.plotter.plot(truth=truth_values, parameters=parameters) else: fig = cc.plotter.plot(parameters=parameters) save_plot(fig, prefix='contours', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_posteriors(chain, discard, source, version, cap=None, display=True, save=False, truth_values=None, cc=None): """Plots posterior distributions of mcmc chain truth_values : list|dict Specify parameters of point (e.g. the true value) to draw on the distributions. """ default_plt_options() pkeys = mcmc_versions.get_parameter(source, version, 'param_keys') pkey_labels = plot_tools.convert_mcmc_labels(param_keys=pkeys) if cc is None: cc = mcmc_tools.setup_chainconsumer(chain=chain, param_labels=pkey_labels, discard=discard, cap=cap) height = 3 * ceil(len(pkeys) / 4) if truth_values is not None: fig = cc.plotter.plot_distributions(figsize=[10, height], truth=truth_values) else: fig = cc.plotter.plot_distributions(figsize=[10, height]) plt.tight_layout() save_plot(fig, prefix='posteriors', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_mass_radius(chain, discard, source, version, cap=None, display=True, save=False, summary=False, sigmas=np.linspace(0, 2, 5), fontsize=18, figsize='column'): """Plots contours of mass versus radius from a given chain """ default_plt_options() mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version) mass_radius_chain = mcmc_params.get_mass_radius_chain(chain=chain, discard=discard, source=source, version=version, cap=cap, mass_nw=mass_nw, mass_gr=mass_gr) cc = mcmc_tools.setup_custom_chainconsumer(mass_radius_chain, parameters=['R', 'M'], sigmas=sigmas, summary=summary, fontsize=fontsize) fig = cc.plotter.plot(figsize=figsize) fig.subplots_adjust(left=0.16, bottom=0.15) save_plot(fig, prefix='mass-radius', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_redshift(chain, discard, source, version, cap=None, display=True, save=False): """Plots posterior distribution of redshift given a chain """ mass_nw, mass_gr = mcmc_params.get_constant_masses(source, version) redshift_chain = mcmc_params.get_redshift_chain(chain=chain, discard=discard, source=source, version=version, cap=cap, mass_nw=mass_nw, mass_gr=mass_gr) cc = mcmc_tools.setup_custom_chainconsumer(redshift_chain, parameters=['1+z']) fig = cc.plotter.plot_distributions(figsize=[5, 5]) plt.tight_layout() save_plot(fig, prefix='redshift', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_gravitational_contours(chain, discard, source, version, cap=None, display=True, save=False, r_nw=10, sigmas=np.linspace(0, 2, 5), summary=False, unit_labels=True, fontsize=16, fixed_grav=False, figsize=None): """Plots contours of gravitational parameters """ cc = mcmc_tools.setup_gravitational_chainconsumer(chain=chain, discard=discard, source=source, version=version, cap=cap, fixed_grav=fixed_grav, summary=summary, r_nw=r_nw, unit_labels=unit_labels, sigmas=sigmas, fontsize=fontsize) if fixed_grav: fig = cc.plotter.plot_distributions(figsize=figsize) plt.tight_layout() else: fig = cc.plotter.plot() save_plot(fig, prefix='gravitational', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_inclination(chain, discard, source, version, cap=None, display=True, save=False, disc_model='he16_a', sigmas=np.linspace(0, 2, 5), summary=False, unit_labels=True, figsize=(4, 4), fontsize=18): """Plots contours of parameters derived using disc model """ disc_chain = mcmc_params.get_disc_chain(chain=chain, discard=discard, cap=cap, source=source, version=version, disc_model=disc_model) cc = mcmc_tools.setup_custom_chainconsumer(disc_chain, parameters=['d', 'i'], sigmas=sigmas, summary=summary, unit_labels=unit_labels, fontsize=fontsize) fig = cc.plotter.plot(figsize=figsize) fig.subplots_adjust(left=0.15, bottom=0.15) save_plot(fig, prefix='disc', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_distance_anisotropy(chain, discard, source, version, cap=None, display=True, save=False, sigmas=np.linspace(0, 2, 5), summary=False, figsize=(4, 4), unit_labels=True, fontsize=18): """Plots contours of MCMC parameters d_b, xi_ratio """ d_b_chain = mcmc_params.get_param_chain(chain, param='d_b', discard=discard, source=source, version=version, cap=cap) xi_ratio_chain = mcmc_params.get_param_chain(chain, param='xi_ratio', discard=discard, source=source, version=version, cap=cap) flat_chain = np.column_stack([d_b_chain, xi_ratio_chain]) cc = mcmc_tools.setup_custom_chainconsumer(flat_chain, parameters=['d_b', 'xi_ratio'], sigmas=sigmas, summary=summary, unit_labels=unit_labels, fontsize=fontsize) fig = cc.plotter.plot(figsize=figsize) fig.subplots_adjust(left=0.2, bottom=0.2) save_plot(fig, prefix='distance', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_xedd(chain, discard, source, version, cap=None, display=True, save=False, cloud=True, sigmas=np.linspace(0, 2, 10), figsize=(5, 5)): """Plots posterior for Eddington hydrogen composition (X_Edd) """ default_plt_options() xedd_chain = mcmc_params.get_xedd_chain(chain=chain, discard=discard, source=source, version=version, cap=cap) label = plot_tools.quantity_label('xedd') cc = mcmc_tools.setup_custom_chainconsumer(xedd_chain, parameters=[label], sigmas=sigmas, cloud=cloud) fig = cc.plotter.plot(figsize=figsize) save_plot(fig, prefix='xedd', chain=chain, save=save, source=source, version=version, display=display) return fig def plot_walkers(chain, source, version, params=None, n_lines=30, xlim=-1, display=True, save=False, label=''): """Plots walkers vs steps (i.e. "time") Parameters ---------- source : str version : int chain : np.array chain as returned by load_chain() params : [str] parameter(s) of which to plot walkers. n_lines : int approx number of lines/walkers to plot on parameter xlim : int x-axis limit to plot (n_steps), i.e. ax.set_xlim((0, xlim)) label : str optional label to add to filename when saving display : bool save : bool """ default_plt_options() pkeys = mcmc_versions.get_parameter(source, version, 'param_keys') # ===== Default to splitting all params into 2 plots ===== if params is None: half = int(len(pkeys) / 2) for i, param_split in enumerate((pkeys[:half], pkeys[half:])): plot_walkers(chain=chain, source=source, version=version, params=param_split, n_lines=n_lines, xlim=xlim, display=display, save=save, label=f'P{i + 1}') return n_walkers, n_steps, n_dim = chain.shape n_params = len(params) jump_size = round(n_walkers / n_lines) steps = np.arange(n_steps) walker_idxs = np.arange(0, n_walkers, jump_size) # noinspection PyTypeChecker fig, ax = plt.subplots(n_params, 1, sharex=True, figsize=(10, 12)) for i in range(n_params): p_idx = pkeys.index(params[i]) for j in walker_idxs: walker = chain[j, :, p_idx] ax[i].plot(steps, walker, linewidth=0.5, color='black') ax[i].set_ylabel(params[i]) if xlim == -1: xlim = n_steps ax[-1].set_xlabel('Step') ax[-1].set_xlim([0, xlim]) plt.tight_layout() if display: plt.show(block=False) save_plot(fig, prefix='walkers', chain=chain, save=save, source=source, version=version, display=display, label=label, extension='.png') def plot_qb_mdot(chain, source, version, discard, cap=None, display=True, save=False, figsize=(5, 5), fontsize=16, sigmas=(1, 2)): """Plots 2D contours of Qb versus Mdot for each epoch (from multi-epoch chain) """ mv = mcmc_versions.McmcVersion(source=source, version=version) chain_flat = mcmc_tools.slice_chain(chain, discard=discard, cap=cap, flatten=True) system_table = obs_tools.load_summary(mv.system) epochs = list(system_table.epoch) cc = chainconsumer.ChainConsumer() param_labels = [] for param in ['mdot', 'qb']: param_labels += [plot_tools.full_label(param)] for i, epoch in enumerate(epochs): mdot_idx = mv.param_keys.index(f'mdot{i + 1}') qb_idx = mv.param_keys.index(f'qb{i + 1}') param_idxs = [mdot_idx, qb_idx] cc.add_chain(chain_flat[:, param_idxs], parameters=param_labels, name=str(epoch)) cc.configure(kde=False, smooth=0, label_font_size=fontsize, tick_font_size=fontsize-2, sigmas=sigmas) fig = cc.plotter.plot(display=False, figsize=figsize) fig.subplots_adjust(left=0.2, bottom=0.2) save_plot(fig, prefix='qb', save=save, source=source, version=version, display=display, chain=chain) return fig def plot_epoch_posteriors(master_cc, source, version, display=True, save=False, col_wrap=None, alt_params=True, unit_labels=True, add_text=True, fontsize=16): """Plot posteriors for multiiple epoch chains parameters ---------- master_cc : ChainConsumer Contains the multi-epoch chain, created with setup_master_chainconsumer() source : str version : int display : bool (optional) save : bool (optional) col_wrap : int (optional) """ param_order = { 'grid5': ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'm_nw', 'm_gr', 'd_b', 'xi_ratio'], 'he2': ['mdot1', 'mdot2', 'qb1', 'qb2', 'm_gr', 'd_b', 'xi_ratio'], } param_keys = param_order[source] # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # quick and dirty patch! if alt_params: param_keys = ['mdot1', 'mdot2', 'mdot3', 'qb1', 'qb2', 'qb3', 'x', 'z', 'g', 'M', 'd_b', 'xi_ratio'] # TODO: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx formatted_params = plot_tools.convert_mcmc_labels(param_keys, unit_labels=unit_labels) n_epochs = len(master_cc.chains) - 1 if col_wrap is None: col_wrap = n_epochs height = 3 * ceil(len(param_keys) / n_epochs) fig = master_cc.plotter.plot_distributions(parameters=formatted_params, col_wrap=col_wrap, figsize=[8, height], display=False) if add_text: add_epoch_text(fig, fontsize=fontsize) plt.tight_layout() save_plot(fig, prefix='multi_posteriors', save=save, source=source, version=version, display=display, enforce_chain_info=False) return fig def plot_max_lhood(source, version, n_walkers, n_steps, verbose=True, re_interp=False, display=True, save=False): default_plt_options() max_params, max_lhood = mcmc_tools.get_max_lhood_params(source, version=version, n_walkers=n_walkers, n_steps=n_steps, verbose=verbose, return_lhood=True) bfit = burstfit.BurstFit(source=source, version=version, verbose=False, re_interp=re_interp) lhood, fig = bfit.lhood(max_params, plot=True) if lhood != max_lhood: print_warning(f'lhoods do not match (original={max_lhood:.2f}, current={lhood:.2f}). ' + 'BurstFit (e.g. lhood, lnhood) or interpolator may have changed') save_plot(fig, prefix='compare', n_dimensions=len(max_params), n_walkers=n_walkers, n_steps=n_steps, save=save, source=source, version=version, display=display) def plot_bprop_sample(bp_sample, source, version, bprops=None, legend=True, subplot_figsize=(3, 2.5), bfit=None, fontsize=14, vlines=True): """Plot burst properties from large sample against observations bprop_sample : np.array obtained using mcmc_tools.bprop_sample() """ if bfit is None: bfit = burstfit.BurstFit(source=source, version=version, verbose=False) if bprops is None: bprops = bfit.mcmc_version.bprops cc = mcmc_tools.setup_bprop_chainconsumer(chain=None, n=None, discard=None, source=source, version=version, bp_sample=bp_sample) bp_summary = mcmc_tools.extract_bprop_summary(cc, source=source, version=version) n_bprops = len(bprops) n_rows = int(np.ceil(n_bprops / 2)) n_cols = {False: 1, True: 2}.get(n_bprops > 1) figsize = (n_cols * subplot_figsize[0], n_rows * subplot_figsize[1]) fig, ax = plt.subplots(n_rows, n_cols, sharex=False, figsize=figsize) if n_bprops % 2 == 1 and n_bprops > 1: # blank odd-numbered subplot ax[-1, -1].axis('off') for i, bprop in enumerate(bprops): subplot_row = int(np.floor(i / 2)) subplot_col = i % 2 if n_cols > 1: axis = ax[subplot_row, subplot_col] else: axis = ax u_model = np.diff(bp_summary[:, :, i], axis=0) bfit.plot_compare(model=bp_summary[1, :, i], u_model=u_model, bprop=bprop, fontsize=fontsize, ax=axis, display=False, vlines=vlines, legend=True if (i == 0 and legend) else False, xlabel=True if (i in [n_bprops-1, ]) else False) fig.subplots_adjust(wspace=0.4) plt.show(block=False) return fig def plot_autocorrelation(chain, source, version, n_points=10, load=True, save_tau=True, ylims=None): """Plots estimated integrated autocorrelation time Note: Adapted from https://dfm.io/posts/autocorr/ """ mv = mcmc_versions.McmcVersion(source=source, version=version) params_fmt = plot_tools.convert_mcmc_labels(mv.param_keys) if load: sample_steps, autoc = mcmc_tools.load_autocorrelation(source, version=version, n_steps=chain.shape[1]) else: sample_steps, autoc = mcmc_tools.get_autocorrelation(chain, source=source, version=version, n_points=n_points, save=save_tau) fig, ax = plt.subplots() for i, param in enumerate(mv.param_keys): ax.loglog(sample_steps, autoc[i], "o-", label=rf"{params_fmt[i]}") ax.plot(sample_steps, sample_steps / 10.0, "--k", label=r"$\tau = N/10$") if ylims is None: xlim = ax.get_xlim() ylims = [5, xlim[1] / 10] ax.set_ylim(ylims) ax.set_xlabel("N steps") ax.set_ylabel(r"$\tau$ estimate (N)") ax.legend(fontsize=14, ncol=2, labelspacing=0.3) plt.show(block=False) return fig def add_epoch_text(fig, fontsize, epochs=(1998, 2000, 2007), colours=('C0', 'C2', 'C3')): """Adds text of epoch to figure subplots """ for i, epoch in enumerate(epochs): ax = fig.axes[i] ax.text(0.95, 0.95, str(epoch), color=colours[i], fontsize=fontsize, transform=ax.transAxes, va='top', ha='right')
[ [ [ 7, 18 ], [ 4714, 4716 ], [ 7139, 7141 ], [ 9169, 9171 ], [ 10333, 10335 ], [ 11350, 11352 ], [ 12546, 12548 ], [ 11892, 11894 ], [ 14519, 14521 ], [ 14556, 14558 ], [ 20411, 20413 ], [ 20804, 20806 ], [ 20974, 20976 ] ], [ [ 26, 28 ], [ 435, 437 ], [ 1678, 1680 ] ], [ [ 36, 39 ] ], [ [ 47, 71 ], [ 659, 662 ], [ 1791, 1794 ], [ 1831, 1834 ], [ 6822, 6825 ], [ 8858, 8861 ], [ 9985, 9988 ], [ 14639, 14642 ], [ 15054, 15057 ], [ 15098, 15101 ], [ 18251, 18254 ], [ 20573, 20576 ], [ 21393, 21396 ], [ 22338, 22341 ], [ 22792, 22795 ] ], [ [ 79, 92 ], [ 15786, 15799 ] ], [ [ 110, 114 ], [ 6556, 6560 ], [ 17877, 17881 ] ], [ [ 140, 153 ], [ 4986, 4999 ], [ 6241, 6254 ], [ 13908, 13921 ], [ 15540, 15553 ], [ 21689, 21702 ] ], [ [ 168, 178 ], [ 1329, 1339 ], [ 2393, 2403 ], [ 3502, 3512 ], [ 5129, 5139 ], [ 6399, 6409 ], [ 7709, 7719 ], [ 8724, 8734 ], [ 9401, 9411 ], [ 10744, 10754 ], [ 11946, 11956 ], [ 12901, 12911 ], [ 15615, 15625 ], [ 18621, 18631 ], [ 20064, 20074 ], [ 20297, 20307 ], [ 21854, 21864 ], [ 22037, 22047 ] ], [ [ 193, 201 ], [ 19003, 19011 ], [ 19923, 19931 ] ], [ [ 216, 227 ], [ 7313, 7324 ], [ 7386, 7397 ], [ 8352, 8363 ], [ 8422, 8433 ], [ 10525, 10536 ], [ 11543, 11554 ], [ 11714, 11725 ], [ 12703, 12714 ] ], [ [ 261, 270 ], [ 15705, 15714 ] ], [ [ 300, 310 ], [ 5067, 5077 ], [ 5481, 5491 ], [ 6318, 6328 ], [ 12858, 12868 ], [ 15897, 15907 ], [ 17696, 17706 ], [ 21764, 21774 ] ], [ [ 350, 365 ], [ 1635, 1650 ] ], [ [ 367, 380 ], [ 19176, 19189 ] ], [ [ 414, 420 ], [ 3722, 3728 ], [ 3936, 3942 ], [ 4147, 4153 ], [ 4364, 4370 ] ], [ [ 422, 432 ] ], [ [ 468, 487 ], [ 689, 708 ], [ 2222, 2241 ], [ 4928, 4947 ], [ 6207, 6226 ], [ 7268, 7287 ], [ 12664, 12683 ], [ 13874, 13893 ], [ 18571, 18590 ] ], [ [ 717, 726 ], [ 5730, 5739 ], [ 6845, 6854 ], [ 8030, 8039 ], [ 8882, 8891 ], [ 10051, 10060 ], [ 11082, 11091 ], [ 12292, 12301 ], [ 13094, 13103 ], [ 15125, 15134 ], [ 16458, 16467 ], [ 18275, 18284 ], [ 19358, 19367 ] ], [ [ 1852, 1871 ] ], [ [ 3197, 3211 ] ], [ [ 4552, 4565 ], [ 2890, 2903 ], [ 3989, 4002 ] ], [ [ 5872, 5887 ], [ 2723, 2738 ], [ 3777, 3792 ] ], [ [ 6989, 7005 ], [ 3056, 3072 ], [ 4203, 4219 ] ], [ [ 8175, 8188 ], [ 4417, 4430 ] ], [ [ 9024, 9051 ] ], [ [ 10198, 10214 ] ], [ [ 11220, 11244 ] ], [ [ 12434, 12443 ] ], [ [ 13232, 13244 ], [ 2577, 2589 ], [ 14173, 14185 ] ], [ [ 15296, 15308 ] ], [ [ 16594, 16615 ] ], [ [ 18438, 18452 ] ], [ [ 19553, 19570 ] ], [ [ 21436, 21456 ] ], [ [ 22836, 22850 ], [ 18207, 18221 ] ] ]
import socket import time from xmlrpclib_to import ServerProxy import httpretty import pytest XML_RESPONSE = """<?xml version="1.0"?> <methodResponse> <params> <param> <value><string>Test</string></value> </param> </params> </methodResponse>""" def timeout(request, url, headers): time.sleep(1) return 200, headers, XML_RESPONSE @httpretty.activate def test_timeout(): httpretty.register_uri( httpretty.POST, 'http://example.com/RPC2', content_type='text/xml', body=timeout ) proxy = ServerProxy('http://example.com', timeout=0.5) with pytest.raises(socket.timeout): proxy.test() @httpretty.activate def test_timeout_https(): httpretty.register_uri( httpretty.POST, 'https://example.com/RPC2', content_type='text/xml', body=timeout ) proxy = ServerProxy('https://example.com', timeout=0.5) with pytest.raises(socket.timeout): proxy.test() if __name__ == "__main__": test_timeout() test_timeout_https()
[ [ [ 7, 13 ], [ 658, 664 ], [ 976, 982 ] ], [ [ 21, 25 ], [ 330, 334 ] ], [ [ 52, 63 ], [ 588, 599 ], [ 905, 916 ] ], [ [ 71, 80 ], [ 389, 398 ], [ 699, 708 ], [ 433, 442 ], [ 465, 474 ], [ 748, 757 ], [ 780, 789 ] ], [ [ 88, 94 ], [ 644, 650 ], [ 962, 968 ] ], [ [ 97, 109 ], [ 373, 385 ] ], [ [ 290, 297 ], [ 562, 569 ], [ 878, 885 ] ], [ [ 412, 424 ], [ 1047, 1059 ] ], [ [ 722, 740 ], [ 1066, 1084 ] ] ]
from django.db.models.signals import m2m_changed from django.dispatch import receiver from .models import Image @receiver(m2m_changed, sender=Image.users_like.through) def users_like_changed(sender, instance, **kwargs): instance.total_likes = instance.users_like.count() instance.save()
[ [ [ 37, 48 ], [ 129, 140 ] ], [ [ 78, 86 ], [ 120, 128 ] ], [ [ 108, 113 ], [ 149, 154 ] ], [ [ 180, 198 ] ] ]
{ "targets": [ { "target_name": "binding", "win_delay_load_hook": "true", "conditions": [ ["target_arch == 'x64' or target_arch == 'ia32'", { "sources": [ "src/binding.cpp", "src/BLAKE2/sse/blake2b.c", "src/BLAKE2/sse/blake2bp.c", "src/BLAKE2/sse/blake2s.c", "src/BLAKE2/sse/blake2sp.c" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "src/BLAKE2/sse" ] }], ["target_arch == 'arm64'", { "sources": [ "src/binding.cpp", "src/BLAKE2/neon/blake2b-neon.c", "src/BLAKE2/neon/blake2bp.c", "src/BLAKE2/neon/blake2s-neon.c", "src/BLAKE2/neon/blake2sp.c" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "src/BLAKE2/neon" ] }], ["target_arch != 'x64' and target_arch != 'ia32' and target_arch != 'arm64'", { "sources": [ "src/binding.cpp", "src/BLAKE2/ref/blake2b-ref.c", "src/BLAKE2/ref/blake2bp-ref.c", "src/BLAKE2/ref/blake2s-ref.c", "src/BLAKE2/ref/blake2sp-ref.c" ], "include_dirs": [ "<!(node -e \"require('nan')\")", "src/BLAKE2/ref" ] }] ], "cflags_c": [ "-std=c99", "-Wstrict-aliasing", "-Wextra", "-Wno-unused-function", "-Wno-unused-const-variable" ], "cflags_cc": [ "-Wstrict-aliasing", "-Wextra", "-Wno-unused-function", "-Wno-unused-const-variable", "-Wno-unused-parameter" ], 'xcode_settings': { 'OTHER_CFLAGS': [ "-Wstrict-aliasing", "-Wextra", "-Wno-unused-function", "-Wno-unused-const-variable", "-Wno-unused-parameter" ] }, "msvs_settings": { "VCCLCompilerTool": { "AdditionalOptions": ["/arch:AVX"] } } } ] }
[]
import numpy as np import pandas as pd import pytest from pandas.testing import assert_frame_equal @pytest.fixture def process_test_df(): "Base DataFrame" return pd.DataFrame( {"text": ["a_b_c", "c_d_e", np.nan, "f_g_h"], "numbers": range(1, 5)} ) @pytest.fixture def test_returns_dataframe(): "Base DataFrame" return pd.DataFrame( {"text": ["a1a2", "b1", "c1"], "numbers": [1, 2, 3]}, index=["A", "B", "C"], ) def test_column_name_type(process_test_df): """Raise TypeError if `column_name` type is not `str`.""" with pytest.raises(TypeError): process_test_df.process_text(["text"]) @pytest.mark.xfail(reason="new_column_names is deprecated.") def test_new_column_names_type(process_test_df): """Raise TypeError if `new_column_names` type is not string or list.""" with pytest.raises(TypeError): process_test_df.process_text( column_name="text", new_column_names={"nutext": "rar"} ) def test_column_name_presence(process_test_df): """Raise ValueError if `column_name` is not in dataframe.""" with pytest.raises(ValueError): process_test_df.process_text( column_name="Test", string_function="lower" ) @pytest.mark.xfail(reason="new_column_names is deprecated.") def test_new_column_names_presence_str(test_returns_dataframe): """ Raise ValueError if `new_column_names` is a str and is in the dataframe. """ with pytest.raises(ValueError): test_returns_dataframe.process_text( column_name="text", new_column_names="text", string_function="extractall", pat=r"([ab])?(\d)", ) @pytest.mark.xfail(reason="new_column_names is deprecated.") def test_new_column_names_presence_list(test_returns_dataframe): """ Raise ValueError if `new_column_names` is a list and at least one of the new names is in the dataframe. """ with pytest.raises(ValueError): test_returns_dataframe.process_text( column_name="text", new_column_names=["numbers", "newtext"], string_function="extractall", pat=r"([ab])?(\d)", ) @pytest.mark.xfail(reason="merge_frame is deprecated.") def test_merge_frame_type(test_returns_dataframe): """ Raise TypeError if `merge_frame` type is not bool.""" with pytest.raises(TypeError): test_returns_dataframe.process_text( column_name="text", new_column_names=["number", "newtext"], string_function="extractall", pat=r"([ab])?(\d)", merge_frame="True", ) @pytest.mark.xfail(reason="string_function must be present.") def test_string_function_is_None(process_test_df): """Test that dataframe is returned if string_function is None.""" result = process_test_df.process_text(column_name="text") assert_frame_equal(result, process_test_df) def test_str_split(process_test_df): """Test wrapper for Pandas `str.split()` method.""" expected = process_test_df.assign( text=process_test_df["text"].str.split("_") ) result = process_test_df.process_text( column_name="text", string_function="split", pat="_" ) assert_frame_equal(result, expected) @pytest.mark.xfail(reason="new_column_names is deprecated.") def test_new_column_names(process_test_df): """ Test that a new column name is created when `new_column_name` is not None. """ result = process_test_df.process_text( column_name="text", new_column_names="new_text", string_function="slice", start=2, ) expected = process_test_df.assign( new_text=process_test_df["text"].str.slice(start=2) ) assert_frame_equal(result, expected) @pytest.fixture def no_nulls_df(): return pd.DataFrame({"text": ["a", "b", "c", "d"], "numbers": range(1, 5)}) def test_str_cat(no_nulls_df): """Test outcome for Pandas `.str.cat()` method.""" result = no_nulls_df.process_text( column_name="text", string_function="cat", others=["A", "B", "C", "D"], ) expected = no_nulls_df.assign( text=no_nulls_df["text"].str.cat(others=["A", "B", "C", "D"]) ) assert_frame_equal(result, expected) def test_str_cat_result_is_a_string(no_nulls_df): """ Test wrapper for Pandas `.str.cat()` method when the outcome is a string. """ result = no_nulls_df.process_text( column_name="text", string_function="cat", ) expected = no_nulls_df.assign(text=no_nulls_df["text"].str.cat()) assert_frame_equal(result, expected) @pytest.mark.xfail(reason="new_column_names is deprecated.") def test_str_cat_result_is_a_string_and_new_column_names(no_nulls_df): """ Test wrapper for Pandas `.str.cat()` method when the outcome is a string, and `new_column_names` is not None. """ result = no_nulls_df.process_text( column_name="text", string_function="cat", new_column_names="combined" ) expected = no_nulls_df.assign(combined=no_nulls_df["text"].str.cat()) assert_frame_equal(result, expected) def test_str_get(): """Test outcome for Pandas `.str.get()` method.""" df = pd.DataFrame( {"text": ["aA", "bB", "cC", "dD"], "numbers": range(1, 5)} ) expected = df.assign(text=df["text"].str.get(1)) result = df.process_text(column_name="text", string_function="get", i=-1) assert_frame_equal(result, expected) def test_str_lower(): """Test string conversion to lowercase using `.str.lower()`.""" df = pd.DataFrame( { "codes": range(1, 7), "names": [ "Graham Chapman", "John Cleese", "Terry Gilliam", "Eric Idle", "Terry Jones", "Michael Palin", ], } ) expected = df.assign(names=df["names"].str.lower()) result = df.process_text(column_name="names", string_function="lower") assert_frame_equal(result, expected) def test_str_wrong(process_test_df): """Test that an invalid Pandas string method raises an exception.""" with pytest.raises(KeyError): process_test_df.process_text( column_name="text", string_function="invalid_function" ) def test_str_wrong_parameters(process_test_df): """Test that invalid argument for Pandas string method raises an error.""" with pytest.raises(TypeError): process_test_df.process_text( column_name="text", string_function="split", pattern="_" ) @pytest.fixture def returns_frame_1(): return pd.DataFrame( { "ticker": [ "spx 5/25/2001 p500", "spx 5/25/2001 p600", "spx 5/25/2001 p700", ] } ) @pytest.mark.xfail(reason="merge_frame is deprecated.") def test_return_dataframe_merge_is_None(returns_frame_1): """ Test that the dataframe returned when `merge_frame` is None is the result of the text processing, and is not merged to the original dataframe. """ expected_output = returns_frame_1["ticker"].str.split(" ", expand=True) result = returns_frame_1.process_text( column_name="ticker", string_function="split", expand=True, pat=" " ) assert_frame_equal(result, expected_output) @pytest.mark.xfail(reason="merge_frame is deprecated.") def test_return_dataframe_merge_is_not_None(returns_frame_1): """ Test that the dataframe returned when `merge_frame` is not None is a merger of the original dataframe, and the dataframe generated from the text processing. """ expected_output = pd.concat( [ returns_frame_1, returns_frame_1["ticker"] .str.split(" ", expand=True) .add_prefix("new_"), ], axis="columns", ) result = returns_frame_1.process_text( column_name="ticker", new_column_names="new_", merge_frame=True, string_function="split", expand=True, pat=" ", ) assert_frame_equal(result, expected_output) @pytest.mark.xfail(reason="merge_frame is deprecated.") def test_return_dataframe_merge_is_not_None_new_column_names_is_a_list( returns_frame_1, ): """ Test that the dataframe returned when `merge_frame` is not None is a merger of the original dataframe, and the dataframe generated from the text processing. Also, the `new_column_names` is a list. """ expected_output = pd.concat( [ returns_frame_1, returns_frame_1["ticker"] .str.split(" ", expand=True) .set_axis(["header1", "header2", "header3"], axis="columns"), ], axis="columns", ) result = returns_frame_1.process_text( column_name="ticker", new_column_names=["header1", "header2", "header3"], merge_frame=True, string_function="split", expand=True, pat=" ", ) assert_frame_equal(result, expected_output) @pytest.mark.xfail(reason="new_column_names is deprecated.") def test_return_dataframe_new_column_names_is_a_list_len_unequal( returns_frame_1, ): """ Raise error if text processing returns a dataframe, `new_column_names` is not None, and the length of `new_column_names` is not equal to the length of the new dataframe's columns. """ with pytest.raises(ValueError): returns_frame_1.process_text( column_name="ticker", new_column_names=["header1", "header2"], merge_frame=True, string_function="split", expand=True, pat=" ", ) def test_output_extractall(test_returns_dataframe): """ Raise ValueError if the output is a dataframe. """ with pytest.raises(ValueError): test_returns_dataframe.process_text( column_name="text", string_function="extractall", pat=r"(?P<letter>[ab])?(?P<digit>\d)", ) @pytest.mark.xfail(reason="merge_frame is deprecated.") def test_output_extractall_merge_frame_is_not_None(test_returns_dataframe): """ Test output when `string_function` is "extractall" and `merge_frame` is not None. """ expected_output = test_returns_dataframe["text"].str.extractall( r"(?P<letter>[ab])?(?P<digit>\d)" ) expected_output = test_returns_dataframe.join( expected_output.reset_index("match"), how="outer" ).set_index("match", append=True) result = test_returns_dataframe.process_text( column_name="text", merge_frame=True, string_function="extractall", pat=r"(?P<letter>[ab])?(?P<digit>\d)", ) assert_frame_equal(result, expected_output)
[ [ [ 7, 18 ], [ 222, 224 ] ], [ [ 26, 38 ], [ 172, 174 ], [ 350, 352 ], [ 3870, 3872 ], [ 5288, 5290 ], [ 5652, 5654 ], [ 6724, 6726 ], [ 7780, 7782 ], [ 8646, 8648 ] ], [ [ 46, 52 ], [ 102, 108 ], [ 273, 279 ], [ 656, 662 ], [ 1249, 1255 ], [ 1707, 1713 ], [ 2213, 2219 ], [ 2668, 2674 ], [ 3309, 3315 ], [ 3825, 3831 ], [ 4694, 4700 ], [ 6675, 6681 ], [ 6919, 6925 ], [ 7456, 7462 ], [ 8243, 8249 ], [ 9177, 9183 ], [ 10164, 10170 ], [ 580, 586 ], [ 850, 856 ], [ 1115, 1121 ], [ 1479, 1485 ], [ 1969, 1975 ], [ 2394, 2400 ], [ 6251, 6257 ], [ 6529, 6535 ], [ 9549, 9555 ], [ 9954, 9960 ] ], [ [ 80, 98 ], [ 2916, 2934 ], [ 3269, 3287 ], [ 3785, 3803 ], [ 4286, 4304 ], [ 4654, 4672 ], [ 5164, 5182 ], [ 5513, 5531 ], [ 6093, 6111 ], [ 7409, 7427 ], [ 8196, 8214 ], [ 9130, 9148 ], [ 10864, 10882 ] ], [ [ 121, 136 ] ], [ [ 292, 314 ] ], [ [ 469, 490 ] ], [ [ 720, 746 ] ], [ [ 997, 1022 ] ], [ [ 1313, 1347 ] ], [ [ 1771, 1806 ] ], [ [ 2272, 2293 ] ], [ [ 2733, 2761 ] ], [ [ 2966, 2980 ] ], [ [ 3373, 3394 ] ], [ [ 3844, 3855 ] ], [ [ 3945, 3957 ] ], [ [ 4329, 4360 ] ], [ [ 4758, 4810 ] ], [ [ 5207, 5219 ] ], [ [ 5556, 5570 ] ], [ [ 6136, 6150 ] ], [ [ 6397, 6422 ] ], [ [ 6694, 6709 ] ], [ [ 6978, 7013 ] ], [ [ 7515, 7554 ] ], [ [ 8302, 8368 ] ], [ [ 9241, 9301 ] ], [ [ 9830, 9852 ] ], [ [ 10223, 10269 ] ] ]
from __future__ import division from sympy import (Abs, Catalan, cos, Derivative, E, EulerGamma, exp, factorial, factorial2, Function, GoldenRatio, I, Integer, Integral, Interval, Lambda, Limit, log, Matrix, nan, O, oo, pi, Rational, Float, Rel, S, sin, SparseMatrix, sqrt, summation, Sum, Symbol, symbols, Wild, WildFunction, zeta, zoo, Dummy, Dict, Tuple, FiniteSet) from sympy.core import Expr from sympy.physics.units import second, joule from sympy.polys import Poly, RootOf, RootSum, groebner from sympy.statistics.distributions import Normal, Sample, Uniform from sympy.geometry import Point, Circle from sympy.utilities.pytest import raises from sympy.printing import sstr, sstrrepr, StrPrinter from sympy.core.trace import Tr x, y, z, w = symbols('x,y,z,w') d = Dummy('d') def test_printmethod(): class R(Abs): def _sympystr(self, printer): return "foo(%s)" % printer._print(self.args[0]) assert sstr(R(x)) == "foo(x)" class R(Abs): def _sympystr(self, printer): return "foo" assert sstr(R(x)) == "foo" def test_Abs(): assert str(Abs(x)) == "Abs(x)" assert str(Abs(Rational(1,6))) == "1/6" assert str(Abs(Rational(-1,6))) == "1/6" def test_Add(): assert str(x+y) == "x + y" assert str(x+1) == "x + 1" assert str(x+x**2) == "x**2 + x" assert str(5+x+y+x*y+x**2+y**2) == "x**2 + x*y + x + y**2 + y + 5" assert str(1+x+x**2/2+x**3/3) == "x**3/3 + x**2/2 + x + 1" assert str(2*x-7*x**2 + 2 + 3*y) == "-7*x**2 + 2*x + 3*y + 2" assert str(x-y) == "x - y" assert str(2-x) == "-x + 2" assert str(x-2) == "x - 2" assert str(x-y-z-w) == "-w + x - y - z" assert str(x-z*y**2*z*w) == "-w*y**2*z**2 + x" assert str(x-1*y*x*y) == "-x*y**2 + x" assert str(sin(x).series(x, 0, 15)) == "x - x**3/6 + x**5/120 - x**7/5040 + x**9/362880 - x**11/39916800 + x**13/6227020800 + O(x**15)" def test_Catalan(): assert str(Catalan) == "Catalan" def test_ComplexInfinity(): assert str(zoo) == "zoo" def test_Derivative(): assert str(Derivative(x, y)) == "Derivative(x, y)" assert str(Derivative(x**2, x, evaluate=False)) == "Derivative(x**2, x)" assert str(Derivative(x**2/y, x, y, evaluate=False)) == "Derivative(x**2/y, x, y)" def test_dict(): assert str({1: 1+x}) == sstr({1: 1+x}) == "{1: x + 1}" assert str({1: x**2, 2: y*x}) in ("{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}") assert sstr({1: x**2, 2: y*x}) == "{1: x**2, 2: x*y}" def test_Dict(): assert str(Dict({1: 1+x})) == sstr({1: 1+x}) == "{1: x + 1}" assert str(Dict({1: x**2, 2: y*x})) in ( "{1: x**2, 2: x*y}", "{2: x*y, 1: x**2}") assert sstr(Dict({1: x**2, 2: y*x})) == "{1: x**2, 2: x*y}" def test_Dummy(): assert str(d) == "_d" assert str(d+x) == "_d + x" def test_EulerGamma(): assert str(EulerGamma) == "EulerGamma" def test_Exp(): assert str(E) == "E" def test_factorial(): n = Symbol('n', integer=True) assert str(factorial(-2)) == "0" assert str(factorial(0)) == "1" assert str(factorial(7)) == "5040" assert str(factorial(n)) == "n!" assert str(factorial(2*n)) == "(2*n)!" assert str(factorial(factorial(n))) == '(n!)!' assert str(factorial(factorial2(n))) == '(n!!)!' assert str(factorial2(factorial(n))) == '(n!)!!' assert str(factorial2(factorial2(n))) == '(n!!)!!' def test_Function(): f = Function('f') fx = f(x) w = WildFunction('w') assert str(f) == "f" assert str(fx) == "f(x)" assert str(w) == "w_" def test_Geometry(): assert sstr(Point(0,0)) == 'Point(0, 0)' assert sstr(Circle(Point(0,0), 3)) == 'Circle(Point(0, 0), 3)' # TODO test other Geometry entities def test_GoldenRatio(): assert str(GoldenRatio) == "GoldenRatio" def test_ImaginaryUnit(): assert str(I) == "I" def test_Infinity(): assert str(oo) == "oo" assert str(oo*I) == "oo*I" def test_Integer(): assert str(Integer(-1)) == "-1" assert str(Integer(1)) == "1" assert str(Integer(-3)) == "-3" assert str(Integer(0)) == "0" assert str(Integer(25)) == "25" def test_Integral(): assert str(Integral(sin(x), y)) == "Integral(sin(x), y)" assert str(Integral(sin(x), (y, 0, 1))) == "Integral(sin(x), (y, 0, 1))" def test_Interval(): a = Symbol('a', real=True) assert str(Interval(0, a)) == "[0, a]" assert str(Interval(0, a, False, False)) == "[0, a]" assert str(Interval(0, a, True, False)) == "(0, a]" assert str(Interval(0, a, False, True)) == "[0, a)" assert str(Interval(0, a, True, True)) == "(0, a)" def test_Lambda(): assert str(Lambda(d, d**2)) == "Lambda(_d, _d**2)" def test_Limit(): assert str(Limit(sin(x)/x, x, y)) == "Limit(sin(x)/x, x, y)" assert str(Limit(1/x, x, 0)) == "Limit(1/x, x, 0)" assert str(Limit(sin(x)/x, x, y, dir="-")) == "Limit(sin(x)/x, x, y, dir='-')" def test_list(): assert str([x]) == sstr([x]) == "[x]" assert str([x**2, x*y+1]) == sstr([x**2, x*y+1]) == "[x**2, x*y + 1]" assert str([x**2, [y+x]]) == sstr([x**2, [y+x]]) == "[x**2, [x + y]]" def test_Matrix(): M = Matrix([[x**+1, 1], [y, x+y]]) assert str(M) == sstr(M) == "[x, 1]\n[y, x + y]" M = Matrix() assert str(M) == sstr(M) == "[]" M = Matrix(0, 1, lambda i, j: 0) assert str(M) == sstr(M) == "[]" def test_Mul(): assert str(x/y) == "x/y" assert str(y/x) == "y/x" assert str(x/y/z) == "x/(y*z)" assert str((x+1)/(y+2)) == "(x + 1)/(y + 2)" assert str(2*x/3) == '2*x/3' assert str(-2*x/3) == '-2*x/3' class CustomClass1(Expr): is_commutative = True class CustomClass2(Expr): is_commutative = True cc1 = CustomClass1() cc2 = CustomClass2() assert str(Rational(2)*cc1) == '2*CustomClass1()' assert str(cc1*Rational(2)) == '2*CustomClass1()' assert str(cc1*Float("1.5")) == '1.5*CustomClass1()' assert str(cc2*Rational(2)) == '2*CustomClass2()' assert str(cc2*Rational(2)*cc1) == '2*CustomClass1()*CustomClass2()' assert str(cc1*Rational(2)*cc2) == '2*CustomClass1()*CustomClass2()' def test_NaN(): assert str(nan) == "nan" def test_NegativeInfinity(): assert str(-oo) == "-oo" def test_Normal(): assert str(Normal(x+y, z)) == "Normal(x + y, z)" def test_Order(): assert str(O(x)) == "O(x)" assert str(O(x**2)) == "O(x**2)" assert str(O(x*y)) == "O(x*y, x, y)" def test_Permutation_Cycle(): from sympy.combinatorics import Permutation, Cycle # general principle: economically, canonically show all moved elements # and the size of the permutation. for p, s in [ (Cycle(), 'Cycle()'), (Cycle(2), 'Cycle(2)'), (Cycle(2, 1), 'Cycle(1, 2)'), (Cycle(1, 2)(5)(6, 7)(10), 'Cycle(1, 2)(6, 7)(10)'), (Cycle(3, 4)(1, 2)(3, 4), 'Cycle(1, 2)(4)'), ]: assert str(p) == s Permutation.print_cyclic = False for p, s in [ (Permutation([]), 'Permutation([])'), (Permutation([], size=1), 'Permutation([0])'), (Permutation([], size=2), 'Permutation([0, 1])'), (Permutation([], size=10), 'Permutation([], size=10)'), (Permutation([1, 0, 2]), 'Permutation([1, 0, 2])'), (Permutation([1, 0, 2, 3, 4, 5]), 'Permutation([1, 0], size=6)'), (Permutation([1, 0, 2, 3, 4, 5], size=10), 'Permutation([1, 0], size=10)'), ]: assert str(p) == s Permutation.print_cyclic = True for p, s in [ (Permutation([]), 'Permutation()'), (Permutation([], size=1), 'Permutation(0)'), (Permutation([], size=2), 'Permutation(1)'), (Permutation([], size=10), 'Permutation(9)'), (Permutation([1, 0, 2]), 'Permutation(2)(0, 1)'), (Permutation([1, 0, 2, 3, 4, 5]), 'Permutation(5)(0, 1)'), (Permutation([1, 0, 2, 3, 4, 5], size=10), 'Permutation(9)(0, 1)'), (Permutation([0, 1, 3, 2, 4, 5], size=10), 'Permutation(9)(2, 3)'), ]: assert str(p) == s def test_Pi(): assert str(pi) == "pi" def test_Poly(): assert str(Poly(0, x)) == "Poly(0, x, domain='ZZ')" assert str(Poly(1, x)) == "Poly(1, x, domain='ZZ')" assert str(Poly(x, x)) == "Poly(x, x, domain='ZZ')" assert str(Poly(2*x + 1, x)) == "Poly(2*x + 1, x, domain='ZZ')" assert str(Poly(2*x - 1, x)) == "Poly(2*x - 1, x, domain='ZZ')" assert str(Poly(-1, x)) == "Poly(-1, x, domain='ZZ')" assert str(Poly(-x, x)) == "Poly(-x, x, domain='ZZ')" assert str(Poly(-2*x + 1, x)) == "Poly(-2*x + 1, x, domain='ZZ')" assert str(Poly(-2*x - 1, x)) == "Poly(-2*x - 1, x, domain='ZZ')" assert str(Poly(x - 1, x)) == "Poly(x - 1, x, domain='ZZ')" assert str(Poly(x**2 + 1 + y, x)) == "Poly(x**2 + y + 1, x, domain='ZZ[y]')" assert str(Poly(x**2 - 1 + y, x)) == "Poly(x**2 + y - 1, x, domain='ZZ[y]')" assert str(Poly(x**2 + I*x, x)) == "Poly(x**2 + I*x, x, domain='EX')" assert str(Poly(x**2 - I*x, x)) == "Poly(x**2 - I*x, x, domain='EX')" assert str(Poly(-x*y*z + x*y - 1, x, y, z)) == "Poly(-x*y*z + x*y - 1, x, y, z, domain='ZZ')" assert str(Poly(-w*x**21*y**7*z + (1 + w)*z**3 - 2*x*z + 1, x, y, z)) == \ "Poly(-w*x**21*y**7*z - 2*x*z + (w + 1)*z**3 + 1, x, y, z, domain='ZZ[w]')" assert str(Poly(x**2 + 1, x, modulus=2)) == "Poly(x**2 + 1, x, modulus=2)" assert str(Poly(2*x**2 + 3*x + 4, x, modulus=17)) == "Poly(2*x**2 + 3*x + 4, x, modulus=17)" def test_Pow(): assert str(x**-1) == "1/x" assert str(x**-2) == "x**(-2)" assert str(x**2) == "x**2" assert str((x+y)**-1) == "1/(x + y)" assert str((x+y)**-2) == "(x + y)**(-2)" assert str((x+y)**2) == "(x + y)**2" assert str((x+y)**(1+x)) == "(x + y)**(x + 1)" assert str(x**Rational(1, 3)) == "x**(1/3)" assert str(1/x**Rational(1, 3)) == "x**(-1/3)" assert str(sqrt(sqrt(x))) == "x**(1/4)" assert str(x**-1.0) == '1/x' def test_sqrt(): assert str(sqrt(x)) == "sqrt(x)" assert str(sqrt(x**2)) == "sqrt(x**2)" assert str(1/sqrt(x)) == "1/sqrt(x)" assert str(1/sqrt(x**2)) == "1/sqrt(x**2)" assert str(y/sqrt(x)) == "y/sqrt(x)" assert str(x**(1/2)) == "x**0.5" assert str(1/x**(1/2)) == "x**(-0.5)" def test_Rational(): n1 = Rational(1,4) n2 = Rational(1,3) n3 = Rational(2,4) n4 = Rational(2,-4) n5 = Rational(0) n6 = Rational(1) n7 = Rational(3) n8 = Rational(-3) assert str(n1*n2) == "1/12" assert str(n1*n2) == "1/12" assert str(n3) == "1/2" assert str(n1*n3) == "1/8" assert str(n1+n3) == "3/4" assert str(n1+n2) == "7/12" assert str(n1+n4) == "-1/4" assert str(n4*n4) == "1/4" assert str(n4+n2) == "-1/6" assert str(n4+n5) == "-1/2" assert str(n4*n5) == "0" assert str(n3+n4) == "0" assert str(n1**n7) == "1/64" assert str(n2**n7) == "1/27" assert str(n2**n8) == "27" assert str(n7**n8) == "1/27" assert str(Rational("-25")) == "-25" assert str(Rational("1.25")) == "5/4" assert str(Rational("-2.6e-2")) == "-13/500" assert str(S("25/7")) == "25/7" assert str(S("-123/569")) == "-123/569" assert str(S("0.1[23]", rational=1)) == "61/495" assert str(S("5.1[666]", rational=1)) == "31/6" assert str(S("-5.1[666]", rational=1)) == "-31/6" assert str(S("0.[9]", rational=1)) == "1" assert str(S("-0.[9]", rational=1)) == "-1" assert str(sqrt(Rational(1,4))) == "1/2" assert str(sqrt(Rational(1,36))) == "1/6" assert str((123**25) ** Rational(1,25)) == "123" assert str((123**25+1)**Rational(1,25)) != "123" assert str((123**25-1)**Rational(1,25)) != "123" assert str((123**25-1)**Rational(1,25)) != "122" assert str(sqrt(Rational(81,36))**3) == "27/8" assert str(1/sqrt(Rational(81,36))**3) == "8/27" assert str(sqrt(-4)) == str(2*I) assert str(2**Rational(1,10**10)) == "2**(1/10000000000)" def test_Float(): # NOTE prec is the whole number of decimal digits assert str(Float('1.23', prec=1+2)) == '1.23' assert str(Float('1.23456789', prec=1+8)) == '1.23456789' assert str(Float('1.234567890123456789', prec=1+18)) == '1.234567890123456789' assert str(pi.evalf(1+2)) == '3.14' assert str(pi.evalf(1+14)) == '3.14159265358979' assert str(pi.evalf(1+64)) == '3.1415926535897932384626433832795028841971693993751058209749445923' assert str(pi.round(-1)) == '0.' assert str((pi**400 - (pi**400).round(1)).n(2)) == '-0.e+88' def test_Relational(): assert str(Rel(x, y, "<")) == "x < y" assert str(Rel(x+y, y, "==")) == "x + y == y" def test_RootOf(): assert str(RootOf(x**5 + 2*x - 1, 0)) == "RootOf(x**5 + 2*x - 1, 0)" def test_RootSum(): f = x**5 + 2*x - 1 assert str(RootSum(f, Lambda(z, z), auto=False)) == "RootSum(x**5 + 2*x - 1)" assert str(RootSum(f, Lambda(z, z**2), auto=False)) == "RootSum(x**5 + 2*x - 1, Lambda(_z, _z**2))" def test_GroebnerBasis(): assert str(groebner([], x, y)) == "GroebnerBasis([], x, y, domain='ZZ', order='lex')" F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1] assert str(groebner(F, order='grlex')) == \ "GroebnerBasis([x**2 - x - 3*y + 1, y**2 - 2*x + y - 1], x, y, domain='ZZ', order='grlex')" assert str(groebner(F, order='lex')) == \ "GroebnerBasis([2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7], x, y, domain='ZZ', order='lex')" def test_Sample(): assert str(Sample([x, y, 1])) in [ "Sample([x, y, 1])", "Sample([y, 1, x])", "Sample([1, x, y])", "Sample([y, x, 1])", "Sample([x, 1, y])", "Sample([1, y, x])", ] def test_set(): assert sstr(set()) == 'set()' assert sstr(frozenset()) == 'frozenset()' assert sstr(set([1,2,3]))== 'set([1, 2, 3])' assert sstr(set([1,x,x**2,x**3,x**4])) == 'set([1, x, x**2, x**3, x**4])' def test_SparseMatrix(): M = SparseMatrix([[x**+1, 1], [y, x+y]]) assert str(M) == sstr(M) == "[x, 1]\n[y, x + y]" def test_Sum(): assert str(summation(cos(3*z), (z, x, y))) == "Sum(cos(3*z), (z, x, y))" assert str(Sum(x*y**2, (x, -2, 2), (y, -5, 5))) == \ "Sum(x*y**2, (x, -2, 2), (y, -5, 5))" def test_Symbol(): assert str(y) == "y" assert str(x) == "x" e = x assert str(e) == "x" def test_tuple(): assert str((x,)) == sstr((x,)) == "(x,)" assert str((x+y, 1+x)) == sstr((x+y, 1+x)) == "(x + y, x + 1)" assert str((x+y, (1+x, x**2))) == sstr((x+y, (1+x, x**2))) == "(x + y, (x + 1, x**2))" def test_Uniform(): assert str(Uniform(x, y)) == "Uniform(x, y)" assert str(Uniform(x+y, y)) == "Uniform(x + y, y)" def test_Unit(): assert str(second) == "s" assert str(joule) == "kg*m**2/s**2" # issue 2461 def test_wild_str(): # Check expressions containing Wild not causing infinite recursion w = Wild('x') assert str(w + 1) == 'x_ + 1' assert str(exp(2**w) + 5) == 'exp(2**x_) + 5' assert str(3*w + 1) == '3*x_ + 1' assert str(1/w + 1) == '1 + 1/x_' assert str(w**2 + 1) == 'x_**2 + 1' assert str(1/(1-w)) == '1/(-x_ + 1)' def test_zeta(): assert str(zeta(3)) == "zeta(3)" def test_bug2(): e = x-y a = str(e) b = str(e) assert a == b def test_bug4(): e = -2*sqrt(x)-y/sqrt(x)/2 assert str(e) not in ["(-2)*x**1/2(-1/2)*x**(-1/2)*y", "-2*x**1/2(-1/2)*x**(-1/2)*y","-2*x**1/2-1/2*x**-1/2*w"] assert str(e) == "-2*sqrt(x) - y/(2*sqrt(x))" def test_issue922(): e = Integral(x,x) + 1 assert str(e) == 'Integral(x, x) + 1' def test_sstrrepr(): assert sstr('abc') == 'abc' assert sstrrepr('abc') == "'abc'" e = ['a', 'b', 'c', x] assert sstr(e) == "[a, b, c, x]" assert sstrrepr(e) == "['a', 'b', 'c', x]" def test_infinity(): assert sstr(oo*I) == "oo*I" def test_full_prec(): assert sstr(S("0.3"), full_prec=True) == "0.300000000000000" assert sstr(S("0.3"), full_prec="auto") == "0.300000000000000" assert sstr(S("0.3"), full_prec=False) == "0.3" assert sstr(S("0.3")*x, full_prec=True) in [ "0.300000000000000*x", "x*0.300000000000000" ] assert sstr(S("0.3")*x, full_prec="auto") in [ "0.3*x", "x*0.3" ] assert sstr(S("0.3")*x, full_prec=False) in [ "0.3*x", "x*0.3" ] def test_noncommutative(): A, B, C = symbols('A,B,C', commutative=False) assert sstr(A*B*C**-1) == "A*B*C**(-1)" assert sstr(C**-1*A*B) == "C**(-1)*A*B" assert sstr(A*C**-1*B) == "A*C**(-1)*B" assert sstr(sqrt(A)) == "sqrt(A)" assert sstr(1/sqrt(A)) == "A**(-1/2)" def test_empty_printer(): str_printer = StrPrinter() assert str_printer.emptyPrinter("foo") == "foo" assert str_printer.emptyPrinter(x*y) == "x*y" assert str_printer.emptyPrinter(32) == "32" def test_settings(): raises(TypeError, lambda: sstr(S(4), method="garbage")) def test_RandomDomain(): from sympy.stats import Normal, Die, Exponential, pspace, where X = Normal('x1', 0, 1) assert str(where(X>0)) == "Domain: 0 < x1" D = Die('d1', 6) assert str(where(D>4)) == "Domain: Or(d1 == 5, d1 == 6)" A = Exponential('a', 1) B = Exponential('b', 1) assert str(pspace(Tuple(A,B)).domain) =="Domain: And(0 <= a, 0 <= b)" def test_FiniteSet(): assert str(FiniteSet(range(1, 51))) == '{1, 2, 3, ..., 48, 49, 50}' assert str(FiniteSet(range(1, 6))) == '{1, 2, 3, 4, 5}' def test_PrettyPoly(): from sympy.polys.domains import QQ F = QQ.frac_field(x, y) R = QQ[x, y] assert sstr(F.convert(x/(x + y))) == sstr(x/(x + y)) assert sstr(R.convert(x + y)) == sstr(x + y) def test_categories(): from sympy.categories import (Object, Morphism, NamedMorphism, IdentityMorphism, Category) A = Object("A") B = Object("B") f = NamedMorphism(A, B, "f") id_A = IdentityMorphism(A) K = Category("K") assert str(A) == 'Object("A")' assert str(f) == 'NamedMorphism(Object("A"), Object("B"), "f")' assert str(id_A) == 'IdentityMorphism(Object("A"))' assert str(K) == 'Category("K")' def test_Tr(): A, B = symbols('A B', commutative=False) t = Tr(A*B) assert str(t) == 'Tr(A*B)'
[ [ [ 23, 31 ] ], [ [ 52, 55 ], [ 839, 842 ], [ 989, 992 ], [ 1121, 1124 ], [ 1156, 1159 ], [ 1200, 1203 ] ], [ [ 57, 64 ], [ 1954, 1961 ] ], [ [ 66, 69 ], [ 14131, 14134 ] ], [ [ 71, 81 ], [ 2073, 2083 ], [ 2128, 2138 ], [ 2205, 2215 ] ], [ [ 83, 84 ], [ 2914, 2915 ] ], [ [ 86, 96 ], [ 2854, 2864 ] ], [ [ 98, 101 ], [ 15009, 15012 ] ], [ [ 107, 116 ], [ 2996, 3005 ], [ 3033, 3042 ], [ 3069, 3078 ], [ 3108, 3117 ], [ 3145, 3154 ], [ 3188, 3197 ], [ 3198, 3207 ], [ 3239, 3248 ], [ 3303, 3312 ] ], [ [ 118, 128 ], [ 3249, 3259 ], [ 3292, 3302 ], [ 3345, 3355 ], [ 3356, 3366 ] ], [ [ 130, 138 ], [ 3415, 3423 ] ], [ [ 140, 151 ], [ 3766, 3777 ] ], [ [ 153, 154 ], [ 3838, 3839 ], [ 3915, 3916 ], [ 8955, 8956 ], [ 9029, 9030 ], [ 11900, 11901 ], [ 15944, 15945 ] ], [ [ 156, 163 ], [ 3964, 3971 ], [ 4000, 4007 ], [ 4034, 4041 ], [ 4070, 4077 ], [ 4104, 4111 ] ], [ [ 165, 173 ], [ 4162, 4170 ], [ 4223, 4231 ], [ 15625, 15633 ] ], [ [ 179, 187 ], [ 4353, 4361 ], [ 4396, 4404 ], [ 4453, 4461 ], [ 4509, 4517 ], [ 4565, 4573 ] ], [ [ 189, 195 ], [ 4640, 4646 ], [ 12822, 12828 ], [ 12904, 12910 ] ], [ [ 197, 202 ], [ 4714, 4719 ], [ 4779, 4784 ], [ 4834, 4839 ] ], [ [ 204, 207 ] ], [ [ 209, 215 ], [ 5138, 5144 ], [ 5234, 5240 ], [ 5288, 5294 ] ], [ [ 217, 220 ], [ 6152, 6155 ] ], [ [ 222, 223 ], [ 6332, 6333 ], [ 6363, 6364 ], [ 6400, 6401 ] ], [ [ 225, 227 ], [ 3885, 3887 ], [ 3912, 3914 ], [ 6212, 6214 ], [ 15941, 15943 ] ], [ [ 229, 231 ], [ 8106, 8108 ], [ 12255, 12257 ], [ 12297, 12299 ], [ 12351, 12353 ], [ 12455, 12457 ], [ 12493, 12495 ], [ 12504, 12506 ] ], [ [ 233, 241 ], [ 1160, 1168 ], [ 1204, 1212 ], [ 5770, 5778 ], [ 5828, 5836 ], [ 5939, 5947 ], [ 5993, 6001 ], [ 6066, 6074 ], [ 9825, 9833 ], [ 9875, 9883 ], [ 10320, 10328 ], [ 10343, 10351 ], [ 10366, 10374 ], [ 10389, 10397 ], [ 10413, 10421 ], [ 10434, 10442 ], [ 10455, 10463 ], [ 10476, 10484 ], [ 11005, 11013 ], [ 11046, 11054 ], [ 11088, 11096 ], [ 11476, 11484 ], [ 11521, 11529 ], [ 11576, 11584 ], [ 11629, 11637 ], [ 11682, 11690 ], [ 11735, 11743 ], [ 11781, 11789 ], [ 11834, 11842 ], [ 11921, 11929 ] ], [ [ 243, 248 ], [ 5882, 5887 ], [ 12053, 12058 ], [ 12106, 12111 ], [ 12169, 12174 ] ], [ [ 250, 253 ], [ 12581, 12584 ], [ 12623, 12626 ] ], [ [ 259, 260 ], [ 11137, 11138 ], [ 11173, 11174 ], [ 11217, 11218 ], [ 11270, 11271 ], [ 11322, 11323 ], [ 11376, 11377 ], [ 11422, 11423 ], [ 15996, 15997 ], [ 16061, 16062 ], [ 16128, 16129 ], [ 16180, 16181 ], [ 16312, 16313 ], [ 16418, 16419 ], [ 17063, 17064 ] ], [ [ 262, 265 ], [ 1793, 1796 ], [ 4171, 4174 ], [ 4232, 4235 ], [ 4720, 4723 ], [ 4840, 4843 ] ], [ [ 267, 279 ], [ 13995, 14007 ] ], [ [ 281, 285 ], [ 9921, 9925 ], [ 9926, 9930 ], [ 10016, 10020 ], [ 10053, 10057 ], [ 10098, 10102 ], [ 10139, 10143 ], [ 10186, 10190 ], [ 11471, 11475 ], [ 11516, 11520 ], [ 11776, 11780 ], [ 11829, 11833 ], [ 11881, 11885 ], [ 15397, 15401 ], [ 15407, 15411 ], [ 16734, 16738 ], [ 16774, 16778 ] ], [ [ 287, 296 ], [ 14121, 14130 ] ], [ [ 298, 301 ], [ 14198, 14201 ] ], [ [ 303, 309 ], [ 2955, 2961 ], [ 4315, 4321 ] ], [ [ 311, 318 ], [ 768, 775 ], [ 16549, 16556 ], [ 18346, 18353 ] ], [ [ 320, 324 ], [ 14940, 14944 ] ], [ [ 330, 342 ], [ 3451, 3463 ] ], [ [ 344, 348 ], [ 15267, 15271 ] ], [ [ 350, 353 ], [ 2020, 2023 ] ], [ [ 355, 360 ], [ 791, 796 ] ], [ [ 362, 366 ], [ 2525, 2529 ], [ 2590, 2594 ], [ 2690, 2694 ] ], [ [ 368, 373 ], [ 17418, 17423 ] ], [ [ 375, 384 ], [ 17508, 17517 ], [ 17580, 17589 ] ], [ [ 409, 413 ], [ 5608, 5612 ], [ 5668, 5672 ] ], [ [ 446, 452 ], [ 14771, 14777 ] ], [ [ 454, 459 ], [ 14801, 14806 ] ], [ [ 484, 488 ], [ 8151, 8155 ], [ 8207, 8211 ], [ 8263, 8267 ], [ 8320, 8324 ], [ 8388, 8392 ], [ 8457, 8461 ], [ 8515, 8519 ], [ 8574, 8578 ], [ 8644, 8648 ], [ 8715, 8719 ], [ 8780, 8784 ], [ 8861, 8865 ], [ 8943, 8947 ], [ 9017, 9021 ], [ 9092, 9096 ], [ 9190, 9194 ], [ 9354, 9358 ], [ 9433, 9437 ] ], [ [ 490, 496 ], [ 12693, 12699 ] ], [ [ 498, 505 ], [ 12811, 12818 ], [ 12893, 12900 ] ], [ [ 507, 515 ], [ 13024, 13032 ], [ 13165, 13173 ], [ 13313, 13321 ] ], [ [ 559, 565 ], [ 6260, 6266 ] ], [ [ 567, 573 ], [ 13492, 13498 ] ], [ [ 575, 582 ], [ 14649, 14656 ], [ 14698, 14705 ] ], [ [ 610, 615 ], [ 3587, 3592 ], [ 3640, 3645 ] ], [ [ 617, 623 ], [ 3633, 3639 ] ], [ [ 660, 666 ], [ 17032, 17038 ] ], [ [ 695, 699 ], [ 954, 958 ], [ 1069, 1073 ], [ 2323, 2327 ], [ 2445, 2449 ], [ 2544, 2548 ], [ 2685, 2689 ], [ 3582, 3586 ], [ 3628, 3632 ], [ 4943, 4947 ], [ 4995, 4999 ], [ 5069, 5073 ], [ 5190, 5194 ], [ 5264, 5268 ], [ 5338, 5342 ], [ 13756, 13760 ], [ 13796, 13800 ], [ 13843, 13847 ], [ 13892, 13896 ], [ 14053, 14057 ], [ 14434, 14438 ], [ 14485, 14489 ], [ 14560, 14564 ], [ 15720, 15724 ], [ 15824, 15828 ], [ 15936, 15940 ], [ 15991, 15995 ], [ 16056, 16060 ], [ 16123, 16127 ], [ 16175, 16179 ], [ 16307, 16311 ], [ 16413, 16417 ], [ 16597, 16601 ], [ 16641, 16645 ], [ 16685, 16689 ], [ 16729, 16733 ], [ 16767, 16771 ], [ 17744, 17748 ], [ 17774, 17778 ], [ 17801, 17805 ], [ 17827, 17831 ], [ 17058, 17062 ] ], [ [ 701, 709 ], [ 15757, 15765 ], [ 15866, 15874 ] ], [ [ 711, 721 ], [ 16843, 16853 ] ], [ [ 751, 753 ], [ 18388, 18390 ] ], [ [ 755, 756 ], [ 961, 962 ], [ 1076, 1077 ], [ 1125, 1126 ], [ 1262, 1263 ], [ 1293, 1294 ], [ 1324, 1325 ], [ 1326, 1327 ], [ 1363, 1364 ], [ 1367, 1368 ], [ 1371, 1372 ], [ 1434, 1435 ], [ 1436, 1437 ], [ 1443, 1444 ], [ 1497, 1498 ], [ 1501, 1502 ], [ 1561, 1562 ], [ 1594, 1595 ], [ 1624, 1625 ], [ 1655, 1656 ], [ 1699, 1700 ], [ 1750, 1751 ], [ 1756, 1757 ], [ 1797, 1798 ], [ 1807, 1808 ], [ 2084, 2085 ], [ 2139, 2140 ], [ 2145, 2146 ], [ 2216, 2217 ], [ 2224, 2225 ], [ 2316, 2317 ], [ 2334, 2335 ], [ 2373, 2374 ], [ 2384, 2385 ], [ 2454, 2455 ], [ 2465, 2466 ], [ 2536, 2537 ], [ 2555, 2556 ], [ 2599, 2600 ], [ 2610, 2611 ], [ 2699, 2700 ], [ 2710, 2711 ], [ 2800, 2801 ], [ 3440, 3441 ], [ 4175, 4176 ], [ 4236, 4237 ], [ 4724, 4725 ], [ 4727, 4728 ], [ 4730, 4731 ], [ 4787, 4788 ], [ 4790, 4791 ], [ 4844, 4845 ], [ 4847, 4848 ], [ 4850, 4851 ], [ 4936, 4937 ], [ 4949, 4950 ], [ 4978, 4979 ], [ 4984, 4985 ], [ 5001, 5002 ], [ 5007, 5008 ], [ 5052, 5053 ], [ 5061, 5062 ], [ 5075, 5076 ], [ 5084, 5085 ], [ 5147, 5148 ], [ 5162, 5163 ], [ 5386, 5387 ], [ 5417, 5418 ], [ 5444, 5445 ], [ 5480, 5481 ], [ 5530, 5531 ], [ 5566, 5567 ], [ 6267, 6268 ], [ 6334, 6335 ], [ 6365, 6366 ], [ 6402, 6403 ], [ 8159, 8160 ], [ 8215, 8216 ], [ 8268, 8269 ], [ 8271, 8272 ], [ 8327, 8328 ], [ 8334, 8335 ], [ 8395, 8396 ], [ 8402, 8403 ], [ 8466, 8467 ], [ 8521, 8522 ], [ 8524, 8525 ], [ 8582, 8583 ], [ 8589, 8590 ], [ 8652, 8653 ], [ 8659, 8660 ], [ 8720, 8721 ], [ 8727, 8728 ], [ 8785, 8786 ], [ 8799, 8800 ], [ 8866, 8867 ], [ 8880, 8881 ], [ 8948, 8949 ], [ 8957, 8958 ], [ 8960, 8961 ], [ 9022, 9023 ], [ 9031, 9032 ], [ 9034, 9035 ], [ 9098, 9099 ], [ 9106, 9107 ], [ 9115, 9116 ], [ 9198, 9199 ], [ 9230, 9231 ], [ 9239, 9240 ], [ 9359, 9360 ], [ 9369, 9370 ], [ 9440, 9441 ], [ 9449, 9450 ], [ 9456, 9457 ], [ 9547, 9548 ], [ 9578, 9579 ], [ 9613, 9614 ], [ 9645, 9646 ], [ 9686, 9687 ], [ 9731, 9732 ], [ 9772, 9773 ], [ 9781, 9782 ], [ 9822, 9823 ], [ 9872, 9873 ], [ 9931, 9932 ], [ 9965, 9966 ], [ 10021, 10022 ], [ 10058, 10059 ], [ 10103, 10104 ], [ 10144, 10145 ], [ 10191, 10192 ], [ 10225, 10226 ], [ 10264, 10265 ], [ 12585, 12586 ], [ 12627, 12628 ], [ 12700, 12701 ], [ 12709, 12710 ], [ 12780, 12781 ], [ 12789, 12790 ], [ 13037, 13038 ], [ 13109, 13110 ], [ 13122, 13123 ], [ 13138, 13139 ], [ 13500, 13501 ], [ 13904, 13905 ], [ 13906, 13907 ], [ 13911, 13912 ], [ 13916, 13917 ], [ 14010, 14011 ], [ 14025, 14026 ], [ 14145, 14146 ], [ 14202, 14203 ], [ 14211, 14212 ], [ 14346, 14347 ], [ 14364, 14365 ], [ 14426, 14427 ], [ 14440, 14441 ], [ 14471, 14472 ], [ 14478, 14479 ], [ 14491, 14492 ], [ 14498, 14499 ], [ 14538, 14539 ], [ 14546, 14547 ], [ 14549, 14550 ], [ 14566, 14567 ], [ 14574, 14575 ], [ 14577, 14578 ], [ 14657, 14658 ], [ 14706, 14707 ], [ 15315, 15316 ], [ 15402, 15403 ], [ 15412, 15413 ], [ 15634, 15635 ], [ 15636, 15637 ], [ 15810, 15811 ], [ 16189, 16190 ], [ 16321, 16322 ], [ 16427, 16428 ], [ 16944, 16945 ], [ 17710, 17711 ], [ 17727, 17728 ], [ 17759, 17760 ], [ 17762, 17763 ], [ 17779, 17780 ], [ 17782, 17783 ], [ 17816, 17817 ], [ 17832, 17833 ] ], [ [ 758, 759 ], [ 1264, 1265 ], [ 1365, 1366 ], [ 1369, 1370 ], [ 1376, 1377 ], [ 1514, 1515 ], [ 1563, 1564 ], [ 1657, 1658 ], [ 1703, 1704 ], [ 1754, 1755 ], [ 1758, 1759 ], [ 2087, 2088 ], [ 2221, 2222 ], [ 2227, 2228 ], [ 2382, 2383 ], [ 2463, 2464 ], [ 2608, 2609 ], [ 2708, 2709 ], [ 4179, 4180 ], [ 4241, 4242 ], [ 4733, 4734 ], [ 4853, 4854 ], [ 4986, 4987 ], [ 5009, 5010 ], [ 5059, 5060 ], [ 5082, 5083 ], [ 5159, 5160 ], [ 5164, 5165 ], [ 5388, 5389 ], [ 5415, 5416 ], [ 5446, 5447 ], [ 5486, 5487 ], [ 6269, 6270 ], [ 6404, 6405 ], [ 8796, 8797 ], [ 8877, 8878 ], [ 9100, 9101 ], [ 9108, 9109 ], [ 9118, 9119 ], [ 9204, 9205 ], [ 9242, 9243 ], [ 9647, 9648 ], [ 9688, 9689 ], [ 9733, 9734 ], [ 9774, 9775 ], [ 10184, 10185 ], [ 12588, 12589 ], [ 12629, 12630 ], [ 12632, 12633 ], [ 13040, 13041 ], [ 13118, 13119 ], [ 13129, 13130 ], [ 13142, 13143 ], [ 13503, 13504 ], [ 14022, 14023 ], [ 14027, 14028 ], [ 14148, 14149 ], [ 14204, 14205 ], [ 14223, 14224 ], [ 14321, 14322 ], [ 14473, 14474 ], [ 14493, 14494 ], [ 14540, 14541 ], [ 14568, 14569 ], [ 14660, 14661 ], [ 14708, 14709 ], [ 14711, 14712 ], [ 15317, 15318 ], [ 15405, 15406 ], [ 16946, 16947 ], [ 17713, 17714 ], [ 17730, 17731 ], [ 17766, 17767 ], [ 17786, 17787 ], [ 17820, 17821 ], [ 17836, 17837 ] ], [ [ 761, 762 ], [ 1659, 1660 ], [ 1701, 1702 ], [ 1708, 1709 ], [ 5448, 5449 ], [ 6272, 6273 ], [ 9102, 9103 ], [ 9121, 9122 ], [ 9209, 9210 ], [ 9221, 9222 ], [ 9232, 9233 ], [ 9245, 9246 ], [ 12829, 12830 ], [ 12832, 12833 ], [ 12911, 12912 ], [ 12914, 12915 ], [ 14137, 14138 ], [ 14142, 14143 ] ], [ [ 764, 765 ], [ 1661, 1662 ], [ 1710, 1711 ], [ 9196, 9197 ], [ 9218, 9219 ] ], [ [ 787, 788 ], [ 2772, 2773 ], [ 2798, 2799 ], [ 4647, 4648 ], [ 4650, 4651 ] ], [ [ 807, 823 ] ], [ [ 1094, 1102 ] ], [ [ 1235, 1243 ] ], [ [ 1923, 1935 ] ], [ [ 1981, 2001 ] ], [ [ 2039, 2054 ] ], [ [ 2282, 2291 ] ], [ [ 2497, 2506 ] ], [ [ 2743, 2753 ] ], [ [ 2820, 2835 ] ], [ [ 2887, 2895 ] ], [ [ 2929, 2943 ] ], [ [ 3390, 3403 ] ], [ [ 3554, 3567 ] ], [ [ 3731, 3747 ] ], [ [ 3801, 3819 ] ], [ [ 3853, 3866 ] ], [ [ 3933, 3945 ] ], [ [ 4130, 4143 ] ], [ [ 4290, 4303 ] ], [ [ 4610, 4621 ] ], [ [ 4685, 4695 ] ], [ [ 4907, 4916 ] ], [ [ 5115, 5126 ] ], [ [ 5359, 5367 ] ], [ [ 6125, 6133 ] ], [ [ 6171, 6192 ] ], [ [ 6230, 6241 ] ], [ [ 6303, 6313 ] ], [ [ 6431, 6453 ] ], [ [ 8080, 8087 ] ], [ [ 8123, 8132 ] ], [ [ 9520, 9528 ] ], [ [ 9988, 9997 ] ], [ [ 10294, 10307 ] ], [ [ 11970, 11980 ] ], [ [ 12547, 12562 ] ], [ [ 12663, 12674 ] ], [ [ 12756, 12768 ] ], [ [ 12987, 13005 ] ], [ [ 13462, 13473 ] ], [ [ 13733, 13741 ] ], [ [ 13966, 13983 ] ], [ [ 14094, 14102 ] ], [ [ 14291, 14302 ] ], [ [ 14396, 14406 ] ], [ [ 14618, 14630 ] ], [ [ 14743, 14752 ] ], [ [ 14844, 14857 ] ], [ [ 15239, 15248 ] ], [ [ 15294, 15303 ] ], [ [ 15373, 15382 ] ], [ [ 15600, 15613 ] ], [ [ 15692, 15705 ] ], [ [ 15908, 15921 ] ], [ [ 15962, 15976 ] ], [ [ 16512, 16531 ] ], [ [ 16803, 16821 ] ], [ [ 17011, 17024 ] ], [ [ 17093, 17110 ] ], [ [ 17475, 17489 ] ], [ [ 17630, 17645 ] ], [ [ 17844, 17859 ] ], [ [ 18324, 18331 ] ] ]
# model settings _base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='DSC', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), rpn_head=dict( type='RPNHead', in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', scales=[8], ratios=[0.5, 1.0, 2.0], strides=[4, 8, 16, 32, 64]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1.0, 1.0, 1.0, 1.0]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( type='DSCRoIHead', num_stages=3, stage_loss_weights=[dict(loss_mpn=1, loss_bbox=1, loss_cls=1), dict(loss_mpn=1, loss_bbox=0.5, loss_cls=1), dict(loss_mpn=1, loss_bbox=0.5, loss_cls=1), dict(loss_mask=1)], semantic_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=14, sample_num=0), out_channels=256, featmap_strides=[8]), semantic_head=dict( type='FusedSemanticHead', num_ins=5, fusion_level=1, num_convs=4, in_channels=256, conv_out_channels=256, num_classes=183, ignore_label=255, loss_weight=0.2), relative_roi_extractor=dict( type='RelativeRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0), out_channels=256, featmap_strides=[1.0]), mpn_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mpn=[ dict( type='DSCMaskHead', with_conv_res=False, num_convs=4, in_channels=256, conv_out_channels=256, class_agnostic=True, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), dict( type='DSCMaskHead', with_conv_res=True, num_convs=4, in_channels=256, conv_out_channels=256, class_agnostic=True, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), dict( type='DSCMaskHead', with_conv_res=True, num_convs=4, in_channels=256, conv_out_channels=256, class_agnostic=True, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))], bbox_roi_extractor=dict( type='SgSingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=7, sample_num=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='Shared2FCDSCBBoxHead', conv_res=1, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCDSCBBoxHead', conv_res=1, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCDSCBBoxHead', conv_res=1, in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', out_size=14, sample_num=0), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_head=dict( type='DSCMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) # model training and testing settings train_cfg = dict( rpn=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.3, min_pos_iou=0.3, match_low_quality=True, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False), allowed_border=0, pos_weight=-1, debug=False), rpn_proposal=dict( nms_across_levels=False, nms_pre=2000, nms_post=2000, max_num=2000, nms_thr=0.7, min_bbox_size=0), rcnn=[ dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0.5, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='RandomSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True), mask_size=14, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='PseudoSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=False), mask_size=14, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='PseudoSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=False), mask_size=14, pos_weight=-1, debug=False), dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.7, neg_iou_thr=0.7, min_pos_iou=0.7, match_low_quality=False, ignore_iof_thr=-1), sampler=dict( type='PseudoSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=False), mask_size=28, pos_weight=-1, debug=False), ]) test_cfg = dict( rpn=dict( nms_across_levels=False, nms_pre=1000, nms_post=1000, max_num=1000, nms_thr=0.7, min_bbox_size=0), rcnn=dict( score_thr=0.001, nms=dict(type='nms', iou_thr=0.5), max_per_img=100, mask_thr_binary=0.5)) data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 8), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict( seg_prefix=data_root + 'stuffthingmaps/train2017/', pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
[ [ [ 17, 23 ] ], [ [ 146, 151 ] ], [ [ 6393, 6402 ] ], [ [ 9206, 9214 ] ], [ [ 9523, 9532 ], [ 10723, 10732 ] ], [ [ 9548, 9560 ], [ 9931, 9943 ], [ 10498, 10510 ] ], [ [ 9647, 9661 ], [ 10781, 10795 ] ], [ [ 10188, 10201 ], [ 10820, 10833 ], [ 10859, 10872 ] ], [ [ 10675, 10679 ] ] ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Item', fields=[ ('id', models.AutoField(primary_key=True, serialize=False, verbose_name='ID', auto_created=True)), ], ), ]
[ [ [ 47, 63 ] ], [ [ 87, 93 ], [ 296, 302 ] ], [ [ 95, 105 ], [ 124, 134 ], [ 203, 213 ] ], [ [ 114, 123 ] ] ]
"""This file is needed for editable installs (`pip install -e .`). Can be removed once the following is resolved https://github.com/pypa/packaging-problems/issues/256 """ from setuptools import setup setup()
[ [ [ 195, 200 ], [ 202, 207 ] ] ]
############### Injector Parameters ################## # # config - config file used by the compiler pass # funcList - list of functions that are faulty # prob - probability that instuction is faulty # byte - which byte is faulty (0-7) -1 random # singleInj - one injection per active rank (0 or 1) # ptr - add code to inject into pointers (0 or 1) # arith - add code to inject into mathematics (0 or 1) # ctrl - add code to inject into control (0 or 1) # stateFile - unique counter for fault site index; # should differ based on application # ##################################################### config = "jacobi.config" funcList = "\"\"" prob = 1e-5 byte = -1 singleInj = 1 ptr = 1 arith = 1 ctrl = 1 stateFile = "jacobi" ############# Library Parameters ##################### # # FLIPIT_PATH - Path to FlipIt repo # SHOW - libraries and path wraped by mpicc # ##################################################### import os FLIPIT_PATH = os.environ['FLIPIT_PATH'] LLVM_BUILD_PATH = os.environ['LLVM_BUILD_PATH'] SHOW = "" ########### Files to NOT inject inside ############### notInject = [" ", " "] ############ Default Compiler ################# cc = "mpicc" ############ Verbose compiler output ############## verbose = False
[ [ [ 641, 647 ] ], [ [ 666, 674 ] ], [ [ 684, 688 ] ], [ [ 696, 700 ] ], [ [ 706, 715 ] ], [ [ 720, 723 ] ], [ [ 728, 733 ] ], [ [ 738, 742 ] ], [ [ 747, 756 ] ], [ [ 976, 978 ], [ 993, 995 ], [ 1038, 1040 ] ], [ [ 979, 990 ] ], [ [ 1020, 1035 ] ], [ [ 1069, 1073 ] ], [ [ 1136, 1145 ] ], [ [ 1208, 1210 ] ], [ [ 1274, 1281 ] ] ]
def num(a): num = int(a) if (num < 10): return (num + num) elif (num <100): return (num + num //10 + num % 10) elif (num <1000): return (num + num //100 + ( (num //10) % 10) + num % 10) else: return (num + num //1000 + ((num //100) % 10) + ((num //10) % 10) + num %10) count = list(range(10000)) for i in range (10000): temp = num(i) if (temp >= 10000): continue else: count[temp] = -1 for i in range (10000): if (count[i] != -1): print (i)
[ [ [ 4, 7 ], [ 357, 360 ] ], [ [ 296, 301 ], [ 411, 416 ], [ 459, 464 ] ], [ [ 328, 329 ], [ 361, 362 ] ], [ [ 350, 354 ], [ 370, 374 ], [ 417, 421 ] ], [ [ 433, 434 ], [ 465, 466 ], [ 487, 488 ] ] ]
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.base.exchange import Exchange from ccxt.base.errors import ExchangeError class virwox (Exchange): def describe(self): return self.deep_extend(super(virwox, self).describe(), { 'id': 'virwox', 'name': 'VirWoX', 'countries': ['AT', 'EU'], 'rateLimit': 1000, 'has': { 'CORS': True, }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/27766894-6da9d360-5eea-11e7-90aa-41f2711b7405.jpg', 'api': { 'public': 'https://api.virwox.com/api/json.php', 'private': 'https://www.virwox.com/api/trading.php', }, 'www': 'https://www.virwox.com', 'doc': 'https://www.virwox.com/developers.php', }, 'requiredCredentials': { 'apiKey': True, 'secret': False, 'login': True, 'password': True, }, 'api': { 'public': { 'get': [ 'getInstruments', 'getBestPrices', 'getMarketDepth', 'estimateMarketOrder', 'getTradedPriceVolume', 'getRawTradeData', 'getStatistics', 'getTerminalList', 'getGridList', 'getGridStatistics', ], 'post': [ 'getInstruments', 'getBestPrices', 'getMarketDepth', 'estimateMarketOrder', 'getTradedPriceVolume', 'getRawTradeData', 'getStatistics', 'getTerminalList', 'getGridList', 'getGridStatistics', ], }, 'private': { 'get': [ 'cancelOrder', 'getBalances', 'getCommissionDiscount', 'getOrders', 'getTransactions', 'placeOrder', ], 'post': [ 'cancelOrder', 'getBalances', 'getCommissionDiscount', 'getOrders', 'getTransactions', 'placeOrder', ], }, }, }) def fetch_markets(self, params={}): markets = self.publicGetGetInstruments() keys = list(markets['result'].keys()) result = [] for p in range(0, len(keys)): market = markets['result'][keys[p]] id = market['instrumentID'] symbol = market['symbol'] base = market['longCurrency'] quote = market['shortCurrency'] result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'info': market, }) return result def fetch_balance(self, params={}): self.load_markets() response = self.privatePostGetBalances() balances = response['result']['accountList'] result = {'info': balances} for b in range(0, len(balances)): balance = balances[b] currency = balance['currency'] total = balance['balance'] account = { 'free': total, 'used': 0.0, 'total': total, } result[currency] = account return self.parse_balance(result) def fetch_market_price(self, symbol, params={}): self.load_markets() response = self.publicPostGetBestPrices(self.extend({ 'symbols': [symbol], }, params)) result = response['result'] return { 'bid': self.safe_float(result[0], 'bestBuyPrice'), 'ask': self.safe_float(result[0], 'bestSellPrice'), } def fetch_order_book(self, symbol, limit=None, params={}): self.load_markets() request = { 'symbols': [symbol], } if limit is not None: request['buyDepth'] = limit # 100 request['sellDepth'] = limit # 100 response = self.publicPostGetMarketDepth(self.extend(request, params)) orderbook = response['result'][0] return self.parse_order_book(orderbook, None, 'buy', 'sell', 'price', 'volume') def fetch_ticker(self, symbol, params={}): self.load_markets() end = self.milliseconds() start = end - 86400000 response = self.publicGetGetTradedPriceVolume(self.extend({ 'instrument': symbol, 'endDate': self.ymdhms(end), 'startDate': self.ymdhms(start), 'HLOC': 1, }, params)) tickers = response['result']['priceVolumeList'] keys = list(tickers.keys()) length = len(keys) lastKey = keys[length - 1] ticker = tickers[lastKey] timestamp = self.milliseconds() close = self.safe_float(ticker, 'close') return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_float(ticker, 'high'), 'low': self.safe_float(ticker, 'low'), 'bid': None, 'bidVolume': None, 'ask': None, 'askVolume': None, 'vwap': None, 'open': self.safe_float(ticker, 'open'), 'close': close, 'last': close, 'previousClose': None, 'change': None, 'percentage': None, 'average': None, 'baseVolume': self.safe_float(ticker, 'longVolume'), 'quoteVolume': self.safe_float(ticker, 'shortVolume'), 'info': ticker, } def parse_trade(self, trade, symbol=None): sec = self.safe_integer(trade, 'time') timestamp = sec * 1000 return { 'id': trade['tid'], 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'order': None, 'symbol': symbol, 'type': None, 'side': None, 'price': self.safe_float(trade, 'price'), 'amount': self.safe_float(trade, 'vol'), 'fee': None, 'info': trade, } def fetch_trades(self, symbol, since=None, limit=None, params={}): self.load_markets() market = self.market(symbol) response = self.publicGetGetRawTradeData(self.extend({ 'instrument': symbol, 'timespan': 3600, }, params)) result = self.safe_value(response, 'result', {}) trades = self.safe_value(result, 'data', []) return self.parse_trades(trades, market) def create_order(self, symbol, type, side, amount, price=None, params={}): self.load_markets() market = self.market(symbol) request = { 'instrument': market['symbol'], 'orderType': side.upper(), 'amount': amount, } if type == 'limit': request['price'] = price response = self.privatePostPlaceOrder(self.extend(request, params)) return { 'info': response, 'id': self.safe_string(response['result'], 'orderID'), } def cancel_order(self, id, symbol=None, params={}): request = { 'orderID': id, } return self.privatePostCancelOrder(self.extend(request, params)) def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): url = self.urls['api'][api] auth = {} if api == 'private': self.check_required_credentials() auth['key'] = self.apiKey auth['user'] = self.login auth['pass'] = self.password nonce = self.nonce() if method == 'GET': url += '?' + self.urlencode(self.extend({ 'method': path, 'id': nonce, }, auth, params)) else: headers = {'Content-Type': 'application/json'} body = self.json({ 'method': path, 'params': self.extend(auth, params), 'id': nonce, }) return {'url': url, 'method': method, 'body': body, 'headers': headers} def handle_errors(self, code, reason, url, method, headers, body, response): if code == 200: if (body[0] == '{') or (body[0] == '['): if 'result' in response: result = response['result'] if 'errorCode' in result: errorCode = result['errorCode'] if errorCode != 'OK': raise ExchangeError(self.id + ' error returned: ' + body) else: raise ExchangeError(self.id + ' malformed response: no result in response: ' + body) else: # if not a JSON response raise ExchangeError(self.id + ' returned a non-JSON reply: ' + body)
[ [ [ 212, 220 ], [ 280, 288 ] ], [ [ 250, 263 ], [ 9430, 9443 ], [ 9530, 9543 ], [ 9690, 9703 ] ], [ [ 272, 278 ], [ 354, 360 ] ] ]
from marshmallow import fields from .field_set import FieldSet, FieldSetSchema class Destination(FieldSet): def __init__(self, address: str = None, bytes: int = None, domain: str = None, ip: str = None, mac: str = None, packets: int = None, port: int = None, *args, **kwargs): super().__init__(*args, **kwargs) self.address = address self.bytes = bytes self.domain = domain self.ip = ip self.mac = mac self.packets = packets self.port = port class DestinationSchema(FieldSetSchema): address = fields.String() bytes = fields.Integer() domain = fields.String() ip = fields.String() mac = fields.String() packets = fields.Integer() port = fields.Integer()
[ [ [ 24, 30 ], [ 707, 713 ], [ 735, 741 ], [ 765, 771 ], [ 790, 796 ], [ 816, 822 ], [ 846, 852 ], [ 874, 880 ] ], [ [ 55, 63 ], [ 100, 108 ] ], [ [ 65, 79 ], [ 676, 690 ] ], [ [ 88, 99 ] ], [ [ 658, 675 ] ] ]
# Generated by Django 2.2.24 on 2021-08-24 10:53 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('vespawatch', '0052_auto_20210824_1131'), ] operations = [ migrations.AddField( model_name='managementaction', name='result', field=models.CharField(choices=[('ST', 'Successfully treated'), ('UT', 'Unsuccessfully treated'), ('UN', 'Untreated'), ('UK', 'UK')], default='UK', max_length=3, verbose_name='Result'), ), ]
[ [ [ 72, 82 ], [ 109, 119 ], [ 239, 249 ] ], [ [ 84, 90 ], [ 348, 354 ] ], [ [ 99, 108 ] ] ]
# coding: utf-8 """ InsightVM API # Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+&#124;-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+&#124;-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-like` ` not-like` | | `container-status` | `is` ` is-not` | | `containers` | `are` | | `criticality-tag` | `is` ` is-not` ` is-greater-than` ` is-less-than` ` is-applied` ` is-not-applied` | | `custom-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `cve` | `is` ` is-not` ` contains` ` does-not-contain` | | `cvss-access-complexity` | `is` ` is-not` | | `cvss-authentication-required` | `is` ` is-not` | | `cvss-access-vector` | `is` ` is-not` | | `cvss-availability-impact` | `is` ` is-not` | | `cvss-confidentiality-impact` | `is` ` is-not` | | `cvss-integrity-impact` | `is` ` is-not` | | `cvss-v3-confidentiality-impact` | `is` ` is-not` | | `cvss-v3-integrity-impact` | `is` ` is-not` | | `cvss-v3-availability-impact` | `is` ` is-not` | | `cvss-v3-attack-vector` | `is` ` is-not` | | `cvss-v3-attack-complexity` | `is` ` is-not` | | `cvss-v3-user-interaction` | `is` ` is-not` | | `cvss-v3-privileges-required` | `is` ` is-not` | | `host-name` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-empty` ` is-not-empty` ` is-like` ` not-like` | | `host-type` | `in` ` not-in` | | `ip-address` | `is` ` is-not` ` in-range` ` not-in-range` ` is-like` ` not-like` | | `ip-address-type` | `in` ` not-in` | | `last-scan-date` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `location-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `open-ports` | `is` ` is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` ` is-applied` ` is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is-not` ` in-range` ` greater-than` ` less-than` | | `service-name` | `contains` ` does-not-contain` | | `site-id` | `in` ` not-in` | | `software` | `contains` ` does-not-contain` | | `vAsset-cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-datacenter` | `is` ` is-not` | | `vAsset-host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `vAsset-power-state` | `in` ` not-in` | | `vAsset-resource-pool-path` | `contains` ` does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vulnerability-category` | `is` ` is-not` ` starts-with` ` ends-with` ` contains` ` does-not-contain` | | `vulnerability-cvss-v3-score` | `is` ` is-not` | | `vulnerability-cvss-score` | `is` ` is-not` ` in-range` ` is-greater-than` ` is-less-than` | | `vulnerability-exposures` | `includes` ` does-not-include` | | `vulnerability-title` | `contains` ` does-not-contain` ` is` ` is-not` ` starts-with` ` ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501 OpenAPI spec version: 3 Contact: support@rapid7.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from py_insightvm_sdk.models.link import Link # noqa: F401,E501 from py_insightvm_sdk.models.vulnerability_validation_resource import VulnerabilityValidationResource # noqa: F401,E501 class ResourcesVulnerabilityValidationResource(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'links': 'list[Link]', 'resources': 'list[VulnerabilityValidationResource]' } attribute_map = { 'links': 'links', 'resources': 'resources' } def __init__(self, links=None, resources=None): # noqa: E501 """ResourcesVulnerabilityValidationResource - a model defined in Swagger""" # noqa: E501 self._links = None self._resources = None self.discriminator = None if links is not None: self.links = links if resources is not None: self.resources = resources @property def links(self): """Gets the links of this ResourcesVulnerabilityValidationResource. # noqa: E501 Hypermedia links to corresponding or related resources. # noqa: E501 :return: The links of this ResourcesVulnerabilityValidationResource. # noqa: E501 :rtype: list[Link] """ return self._links @links.setter def links(self, links): """Sets the links of this ResourcesVulnerabilityValidationResource. Hypermedia links to corresponding or related resources. # noqa: E501 :param links: The links of this ResourcesVulnerabilityValidationResource. # noqa: E501 :type: list[Link] """ self._links = links @property def resources(self): """Gets the resources of this ResourcesVulnerabilityValidationResource. # noqa: E501 The resources returned. # noqa: E501 :return: The resources of this ResourcesVulnerabilityValidationResource. # noqa: E501 :rtype: list[VulnerabilityValidationResource] """ return self._resources @resources.setter def resources(self, resources): """Sets the resources of this ResourcesVulnerabilityValidationResource. The resources returned. # noqa: E501 :param resources: The resources of this ResourcesVulnerabilityValidationResource. # noqa: E501 :type: list[VulnerabilityValidationResource] """ self._resources = resources def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResourcesVulnerabilityValidationResource, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResourcesVulnerabilityValidationResource): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ [ [ 48229, 48235 ], [ 52087, 52093 ] ], [ [ 48243, 48245 ] ], [ [ 48268, 48271 ], [ 51132, 51135 ] ], [ [ 48314, 48318 ] ], [ [ 48408, 48439 ] ], [ [ 48467, 48507 ], [ 51836, 51876 ], [ 52327, 52367 ] ] ]
# logcall.py from functools import wraps def logged(func): # Idea: Give me a function, I'll put logging # around it print('Adding logging to', func.__name__) @wraps(func) def wrapper(*args, **kwargs): print('You called', func.__name__) return func(*args, **kwargs) return wrapper
[ [ [ 36, 41 ], [ 179, 184 ] ], [ [ 47, 53 ] ] ]
from sympy.utilities.pytest import XFAIL, raises from sympy import (S, Symbol, symbols, nan, oo, I, pi, Float, And, Or, Not, Implies, Xor, zoo, sqrt, Rational, simplify, Function) from sympy.core.compatibility import range from sympy.core.relational import (Relational, Equality, Unequality, GreaterThan, LessThan, StrictGreaterThan, StrictLessThan, Rel, Eq, Lt, Le, Gt, Ge, Ne) from sympy.sets.sets import Interval, FiniteSet x, y, z, t = symbols('x,y,z,t') def test_rel_ne(): assert Relational(x, y, '!=') == Ne(x, y) def test_rel_subs(): e = Relational(x, y, '==') e = e.subs(x, z) assert isinstance(e, Equality) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '>=') e = e.subs(x, z) assert isinstance(e, GreaterThan) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '<=') e = e.subs(x, z) assert isinstance(e, LessThan) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '>') e = e.subs(x, z) assert isinstance(e, StrictGreaterThan) assert e.lhs == z assert e.rhs == y e = Relational(x, y, '<') e = e.subs(x, z) assert isinstance(e, StrictLessThan) assert e.lhs == z assert e.rhs == y e = Eq(x, 0) assert e.subs(x, 0) is S.true assert e.subs(x, 1) is S.false def test_wrappers(): e = x + x**2 res = Relational(y, e, '==') assert Rel(y, x + x**2, '==') == res assert Eq(y, x + x**2) == res res = Relational(y, e, '<') assert Lt(y, x + x**2) == res res = Relational(y, e, '<=') assert Le(y, x + x**2) == res res = Relational(y, e, '>') assert Gt(y, x + x**2) == res res = Relational(y, e, '>=') assert Ge(y, x + x**2) == res res = Relational(y, e, '!=') assert Ne(y, x + x**2) == res def test_Eq(): assert Eq(x**2) == Eq(x**2, 0) assert Eq(x**2) != Eq(x**2, 1) assert Eq(x, x) # issue 5719 # issue 6116 p = Symbol('p', positive=True) assert Eq(p, 0) is S.false def test_rel_Infinity(): # NOTE: All of these are actually handled by sympy.core.Number, and do # not create Relational objects. assert (oo > oo) is S.false assert (oo > -oo) is S.true assert (oo > 1) is S.true assert (oo < oo) is S.false assert (oo < -oo) is S.false assert (oo < 1) is S.false assert (oo >= oo) is S.true assert (oo >= -oo) is S.true assert (oo >= 1) is S.true assert (oo <= oo) is S.true assert (oo <= -oo) is S.false assert (oo <= 1) is S.false assert (-oo > oo) is S.false assert (-oo > -oo) is S.false assert (-oo > 1) is S.false assert (-oo < oo) is S.true assert (-oo < -oo) is S.false assert (-oo < 1) is S.true assert (-oo >= oo) is S.false assert (-oo >= -oo) is S.true assert (-oo >= 1) is S.false assert (-oo <= oo) is S.true assert (-oo <= -oo) is S.true assert (-oo <= 1) is S.true def test_bool(): assert Eq(0, 0) is S.true assert Eq(1, 0) is S.false assert Ne(0, 0) is S.false assert Ne(1, 0) is S.true assert Lt(0, 1) is S.true assert Lt(1, 0) is S.false assert Le(0, 1) is S.true assert Le(1, 0) is S.false assert Le(0, 0) is S.true assert Gt(1, 0) is S.true assert Gt(0, 1) is S.false assert Ge(1, 0) is S.true assert Ge(0, 1) is S.false assert Ge(1, 1) is S.true assert Eq(I, 2) is S.false assert Ne(I, 2) is S.true raises(TypeError, lambda: Gt(I, 2)) raises(TypeError, lambda: Ge(I, 2)) raises(TypeError, lambda: Lt(I, 2)) raises(TypeError, lambda: Le(I, 2)) a = Float('.000000000000000000001', '') b = Float('.0000000000000000000001', '') assert Eq(pi + a, pi + b) is S.false def test_rich_cmp(): assert (x < y) == Lt(x, y) assert (x <= y) == Le(x, y) assert (x > y) == Gt(x, y) assert (x >= y) == Ge(x, y) def test_doit(): from sympy import Symbol p = Symbol('p', positive=True) n = Symbol('n', negative=True) np = Symbol('np', nonpositive=True) nn = Symbol('nn', nonnegative=True) assert Gt(p, 0).doit() is S.true assert Gt(p, 1).doit() == Gt(p, 1) assert Ge(p, 0).doit() is S.true assert Le(p, 0).doit() is S.false assert Lt(n, 0).doit() is S.true assert Le(np, 0).doit() is S.true assert Gt(nn, 0).doit() == Gt(nn, 0) assert Lt(nn, 0).doit() is S.false assert Eq(x, 0).doit() == Eq(x, 0) def test_new_relational(): x = Symbol('x') assert Eq(x) == Relational(x, 0) # None ==> Equality assert Eq(x) == Relational(x, 0, '==') assert Eq(x) == Relational(x, 0, 'eq') assert Eq(x) == Equality(x, 0) assert Eq(x, -1) == Relational(x, -1) # None ==> Equality assert Eq(x, -1) == Relational(x, -1, '==') assert Eq(x, -1) == Relational(x, -1, 'eq') assert Eq(x, -1) == Equality(x, -1) assert Eq(x) != Relational(x, 1) # None ==> Equality assert Eq(x) != Relational(x, 1, '==') assert Eq(x) != Relational(x, 1, 'eq') assert Eq(x) != Equality(x, 1) assert Eq(x, -1) != Relational(x, 1) # None ==> Equality assert Eq(x, -1) != Relational(x, 1, '==') assert Eq(x, -1) != Relational(x, 1, 'eq') assert Eq(x, -1) != Equality(x, 1) assert Ne(x, 0) == Relational(x, 0, '!=') assert Ne(x, 0) == Relational(x, 0, '<>') assert Ne(x, 0) == Relational(x, 0, 'ne') assert Ne(x, 0) == Unequality(x, 0) assert Ne(x, 0) != Relational(x, 1, '!=') assert Ne(x, 0) != Relational(x, 1, '<>') assert Ne(x, 0) != Relational(x, 1, 'ne') assert Ne(x, 0) != Unequality(x, 1) assert Ge(x, 0) == Relational(x, 0, '>=') assert Ge(x, 0) == Relational(x, 0, 'ge') assert Ge(x, 0) == GreaterThan(x, 0) assert Ge(x, 1) != Relational(x, 0, '>=') assert Ge(x, 1) != Relational(x, 0, 'ge') assert Ge(x, 1) != GreaterThan(x, 0) assert (x >= 1) == Relational(x, 1, '>=') assert (x >= 1) == Relational(x, 1, 'ge') assert (x >= 1) == GreaterThan(x, 1) assert (x >= 0) != Relational(x, 1, '>=') assert (x >= 0) != Relational(x, 1, 'ge') assert (x >= 0) != GreaterThan(x, 1) assert Le(x, 0) == Relational(x, 0, '<=') assert Le(x, 0) == Relational(x, 0, 'le') assert Le(x, 0) == LessThan(x, 0) assert Le(x, 1) != Relational(x, 0, '<=') assert Le(x, 1) != Relational(x, 0, 'le') assert Le(x, 1) != LessThan(x, 0) assert (x <= 1) == Relational(x, 1, '<=') assert (x <= 1) == Relational(x, 1, 'le') assert (x <= 1) == LessThan(x, 1) assert (x <= 0) != Relational(x, 1, '<=') assert (x <= 0) != Relational(x, 1, 'le') assert (x <= 0) != LessThan(x, 1) assert Gt(x, 0) == Relational(x, 0, '>') assert Gt(x, 0) == Relational(x, 0, 'gt') assert Gt(x, 0) == StrictGreaterThan(x, 0) assert Gt(x, 1) != Relational(x, 0, '>') assert Gt(x, 1) != Relational(x, 0, 'gt') assert Gt(x, 1) != StrictGreaterThan(x, 0) assert (x > 1) == Relational(x, 1, '>') assert (x > 1) == Relational(x, 1, 'gt') assert (x > 1) == StrictGreaterThan(x, 1) assert (x > 0) != Relational(x, 1, '>') assert (x > 0) != Relational(x, 1, 'gt') assert (x > 0) != StrictGreaterThan(x, 1) assert Lt(x, 0) == Relational(x, 0, '<') assert Lt(x, 0) == Relational(x, 0, 'lt') assert Lt(x, 0) == StrictLessThan(x, 0) assert Lt(x, 1) != Relational(x, 0, '<') assert Lt(x, 1) != Relational(x, 0, 'lt') assert Lt(x, 1) != StrictLessThan(x, 0) assert (x < 1) == Relational(x, 1, '<') assert (x < 1) == Relational(x, 1, 'lt') assert (x < 1) == StrictLessThan(x, 1) assert (x < 0) != Relational(x, 1, '<') assert (x < 0) != Relational(x, 1, 'lt') assert (x < 0) != StrictLessThan(x, 1) # finally, some fuzz testing from random import randint from sympy.core.compatibility import unichr for i in range(100): while 1: strtype, length = (unichr, 65535) if randint(0, 1) else (chr, 255) relation_type = strtype(randint(0, length)) if randint(0, 1): relation_type += strtype(randint(0, length)) if relation_type not in ('==', 'eq', '!=', '<>', 'ne', '>=', 'ge', '<=', 'le', '>', 'gt', '<', 'lt'): break raises(ValueError, lambda: Relational(x, 1, relation_type)) def test_relational_bool_output(): # https://github.com/sympy/sympy/issues/5931 raises(TypeError, lambda: bool(x > 3)) raises(TypeError, lambda: bool(x >= 3)) raises(TypeError, lambda: bool(x < 3)) raises(TypeError, lambda: bool(x <= 3)) raises(TypeError, lambda: bool(Eq(x, 3))) raises(TypeError, lambda: bool(Ne(x, 3))) def test_relational_logic_symbols(): # See issue 6204 assert (x < y) & (z < t) == And(x < y, z < t) assert (x < y) | (z < t) == Or(x < y, z < t) assert ~(x < y) == Not(x < y) assert (x < y) >> (z < t) == Implies(x < y, z < t) assert (x < y) << (z < t) == Implies(z < t, x < y) assert (x < y) ^ (z < t) == Xor(x < y, z < t) assert isinstance((x < y) & (z < t), And) assert isinstance((x < y) | (z < t), Or) assert isinstance(~(x < y), GreaterThan) assert isinstance((x < y) >> (z < t), Implies) assert isinstance((x < y) << (z < t), Implies) assert isinstance((x < y) ^ (z < t), (Or, Xor)) def test_univariate_relational_as_set(): assert (x > 0).as_set() == Interval(0, oo, True, True) assert (x >= 0).as_set() == Interval(0, oo) assert (x < 0).as_set() == Interval(-oo, 0, True, True) assert (x <= 0).as_set() == Interval(-oo, 0) assert Eq(x, 0).as_set() == FiniteSet(0) assert Ne(x, 0).as_set() == Interval(-oo, 0, True, True) + \ Interval(0, oo, True, True) assert (x**2 >= 4).as_set() == Interval(-oo, -2) + Interval(2, oo) @XFAIL def test_multivariate_relational_as_set(): assert (x*y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) + \ Interval(-oo, 0)*Interval(-oo, 0) def test_Not(): assert Not(Equality(x, y)) == Unequality(x, y) assert Not(Unequality(x, y)) == Equality(x, y) assert Not(StrictGreaterThan(x, y)) == LessThan(x, y) assert Not(StrictLessThan(x, y)) == GreaterThan(x, y) assert Not(GreaterThan(x, y)) == StrictLessThan(x, y) assert Not(LessThan(x, y)) == StrictGreaterThan(x, y) def test_evaluate(): assert str(Eq(x, x, evaluate=False)) == 'Eq(x, x)' assert Eq(x, x, evaluate=False).doit() == S.true assert str(Ne(x, x, evaluate=False)) == 'Ne(x, x)' assert Ne(x, x, evaluate=False).doit() == S.false assert str(Ge(x, x, evaluate=False)) == 'x >= x' assert str(Le(x, x, evaluate=False)) == 'x <= x' assert str(Gt(x, x, evaluate=False)) == 'x > x' assert str(Lt(x, x, evaluate=False)) == 'x < x' def assert_all_ineq_raise_TypeError(a, b): raises(TypeError, lambda: a > b) raises(TypeError, lambda: a >= b) raises(TypeError, lambda: a < b) raises(TypeError, lambda: a <= b) raises(TypeError, lambda: b > a) raises(TypeError, lambda: b >= a) raises(TypeError, lambda: b < a) raises(TypeError, lambda: b <= a) def assert_all_ineq_give_class_Inequality(a, b): """All inequality operations on `a` and `b` result in class Inequality.""" from sympy.core.relational import _Inequality as Inequality assert isinstance(a > b, Inequality) assert isinstance(a >= b, Inequality) assert isinstance(a < b, Inequality) assert isinstance(a <= b, Inequality) assert isinstance(b > a, Inequality) assert isinstance(b >= a, Inequality) assert isinstance(b < a, Inequality) assert isinstance(b <= a, Inequality) def test_imaginary_compare_raises_TypeError(): # See issue #5724 assert_all_ineq_raise_TypeError(I, x) def test_complex_compare_not_real(): # two cases which are not real y = Symbol('y', imaginary=True) z = Symbol('z', complex=True, real=False) for w in (y, z): assert_all_ineq_raise_TypeError(2, w) # some cases which should remain un-evaluated t = Symbol('t') x = Symbol('x', real=True) z = Symbol('z', complex=True) for w in (x, z, t): assert_all_ineq_give_class_Inequality(2, w) def test_imaginary_and_inf_compare_raises_TypeError(): # See pull request #7835 y = Symbol('y', imaginary=True) assert_all_ineq_raise_TypeError(oo, y) assert_all_ineq_raise_TypeError(-oo, y) def test_complex_pure_imag_not_ordered(): raises(TypeError, lambda: 2*I < 3*I) # more generally x = Symbol('x', real=True, nonzero=True) y = Symbol('y', imaginary=True) z = Symbol('z', complex=True) assert_all_ineq_raise_TypeError(I, y) t = I*x # an imaginary number, should raise errors assert_all_ineq_raise_TypeError(2, t) t = -I*y # a real number, so no errors assert_all_ineq_give_class_Inequality(2, t) t = I*z # unknown, should be unevaluated assert_all_ineq_give_class_Inequality(2, t) def test_x_minus_y_not_same_as_x_lt_y(): """ A consequence of pull request #7792 is that `x - y < 0` and `x < y` are not synonymous. """ x = I + 2 y = I + 3 raises(TypeError, lambda: x < y) assert x - y < 0 ineq = Lt(x, y, evaluate=False) raises(TypeError, lambda: ineq.doit()) assert ineq.lhs - ineq.rhs < 0 t = Symbol('t', imaginary=True) x = 2 + t y = 3 + t ineq = Lt(x, y, evaluate=False) raises(TypeError, lambda: ineq.doit()) assert ineq.lhs - ineq.rhs < 0 # this one should give error either way x = I + 2 y = 2*I + 3 raises(TypeError, lambda: x < y) raises(TypeError, lambda: x - y < 0) def test_nan_equality_exceptions(): # See issue #7774 import random assert Equality(nan, nan) is S.false assert Unequality(nan, nan) is S.true # See issue #7773 A = (x, S(0), S(1)/3, pi, oo, -oo) assert Equality(nan, random.choice(A)) is S.false assert Equality(random.choice(A), nan) is S.false assert Unequality(nan, random.choice(A)) is S.true assert Unequality(random.choice(A), nan) is S.true def test_nan_inequality_raise_errors(): # See discussion in pull request #7776. We test inequalities with # a set including examples of various classes. for q in (x, S(0), S(10), S(1)/3, pi, S(1.3), oo, -oo, nan): assert_all_ineq_raise_TypeError(q, nan) def test_nan_complex_inequalities(): # Comparisons of NaN with non-real raise errors, we're not too # fussy whether its the NaN error or complex error. for r in (I, zoo, Symbol('z', imaginary=True)): assert_all_ineq_raise_TypeError(r, nan) def test_complex_infinity_inequalities(): raises(TypeError, lambda: zoo > 0) raises(TypeError, lambda: zoo >= 0) raises(TypeError, lambda: zoo < 0) raises(TypeError, lambda: zoo <= 0) def test_inequalities_symbol_name_same(): """Using the operator and functional forms should give same results.""" # We test all combinations from a set # FIXME: could replace with random selection after test passes A = (x, y, S(0), S(1)/3, pi, oo, -oo) for a in A: for b in A: assert Gt(a, b) == (a > b) assert Lt(a, b) == (a < b) assert Ge(a, b) == (a >= b) assert Le(a, b) == (a <= b) for b in (y, S(0), S(1)/3, pi, oo, -oo): assert Gt(x, b, evaluate=False) == (x > b) assert Lt(x, b, evaluate=False) == (x < b) assert Ge(x, b, evaluate=False) == (x >= b) assert Le(x, b, evaluate=False) == (x <= b) for b in (y, S(0), S(1)/3, pi, oo, -oo): assert Gt(b, x, evaluate=False) == (b > x) assert Lt(b, x, evaluate=False) == (b < x) assert Ge(b, x, evaluate=False) == (b >= x) assert Le(b, x, evaluate=False) == (b <= x) def test_inequalities_symbol_name_same_complex(): """Using the operator and functional forms should give same results. With complex non-real numbers, both should raise errors. """ # FIXME: could replace with random selection after test passes for a in (x, S(0), S(1)/3, pi, oo): raises(TypeError, lambda: Gt(a, I)) raises(TypeError, lambda: a > I) raises(TypeError, lambda: Lt(a, I)) raises(TypeError, lambda: a < I) raises(TypeError, lambda: Ge(a, I)) raises(TypeError, lambda: a >= I) raises(TypeError, lambda: Le(a, I)) raises(TypeError, lambda: a <= I) def test_inequalities_cant_sympify_other(): # see issue 7833 from operator import gt, lt, ge, le bar = "foo" for a in (x, S(0), S(1)/3, pi, I, zoo, oo, -oo, nan): for op in (lt, gt, le, ge): raises(TypeError, lambda: op(a, bar)) def test_ineq_avoid_wild_symbol_flip(): # see issue #7951, we try to avoid this internally, e.g., by using # __lt__ instead of "<". from sympy.core.symbol import Wild p = symbols('p', cls=Wild) # x > p might flip, but Gt should not: assert Gt(x, p) == Gt(x, p, evaluate=False) # Previously failed as 'p > x': e = Lt(x, y).subs({y: p}) assert e == Lt(x, p, evaluate=False) # Previously failed as 'p <= x': e = Ge(x, p).doit() assert e == Ge(x, p, evaluate=False) def test_issue_8245(): a = S("6506833320952669167898688709329/5070602400912917605986812821504") q = a.n(10) assert (a == q) is True assert (a != q) is False assert (a > q) == False assert (a < q) == False assert (a >= q) == True assert (a <= q) == True a = sqrt(2) r = Rational(str(a.n(30))) assert (r == a) is False assert (r != a) is True assert (r > a) == True assert (r < a) == False assert (r >= a) == True assert (r <= a) == False a = sqrt(2) r = Rational(str(a.n(29))) assert (r == a) is False assert (r != a) is True assert (r > a) == False assert (r < a) == True assert (r >= a) == False assert (r <= a) == True def test_issue_8449(): p = Symbol('p', nonnegative=True) assert Lt(-oo, p) assert Ge(-oo, p) is S.false assert Gt(oo, -p) assert Le(oo, -p) is S.false def test_simplify(): assert simplify(x*(y + 1) - x*y - x + 1 < x) == (x > 1) assert simplify(S(1) < -x) == (x < -1) def test_equals(): w, x, y, z = symbols('w:z') f = Function('f') assert Eq(x, 1).equals(Eq(x*(y + 1) - x*y - x + 1, x)) assert Eq(x, y).equals(x < y, True) == False assert Eq(x, f(1)).equals(Eq(x, f(2)), True) == f(1) - f(2) assert Eq(f(1), y).equals(Eq(f(2), y), True) == f(1) - f(2) assert Eq(x, f(1)).equals(Eq(f(2), x), True) == f(1) - f(2) assert Eq(f(1), x).equals(Eq(x, f(2)), True) == f(1) - f(2) assert Eq(w, x).equals(Eq(y, z), True) == False assert Eq(f(1), f(2)).equals(Eq(f(3), f(4)), True) == f(1) - f(3) assert (x < y).equals(y > x, True) == True assert (x < y).equals(y >= x, True) == False assert (x < y).equals(z < y, True) == False assert (x < y).equals(x < z, True) == False assert (x < f(1)).equals(x < f(2), True) == f(1) - f(2) assert (f(1) < x).equals(f(2) < x, True) == f(1) - f(2) def test_reversed(): assert (x < y).reversed == (y > x) assert (x <= y).reversed == (y >= x) assert Eq(x, y, evaluate=False).reversed == Eq(y, x, evaluate=False) assert Ne(x, y, evaluate=False).reversed == Ne(y, x, evaluate=False) assert (x >= y).reversed == (y <= x) assert (x > y).reversed == (y < x) def test_canonical(): one = S(1) def unchanged(v): c = v.canonical return v.is_Relational and c.is_Relational and v == c def isreversed(v): return v.canonical == v.reversed assert unchanged(x < one) assert unchanged(x <= one) assert isreversed(Eq(one, x, evaluate=False)) assert unchanged(Eq(x, one, evaluate=False)) assert isreversed(Ne(one, x, evaluate=False)) assert unchanged(Ne(x, one, evaluate=False)) assert unchanged(x >= one) assert unchanged(x > one) assert unchanged(x < y) assert unchanged(x <= y) assert isreversed(Eq(y, x, evaluate=False)) assert unchanged(Eq(x, y, evaluate=False)) assert isreversed(Ne(y, x, evaluate=False)) assert unchanged(Ne(x, y, evaluate=False)) assert isreversed(x >= y) assert isreversed(x > y) assert (-x < 1).canonical == (x > -1) assert isreversed(-x > y) @XFAIL def test_issue_8444(): x = symbols('x', real=True) assert (x <= oo) == (x >= -oo) == True x = symbols('x') assert x >= floor(x) assert (x < floor(x)) == False assert Gt(x, floor(x)) == Gt(x, floor(x), evaluate=False) assert Ge(x, floor(x)) == Ge(x, floor(x), evaluate=False) assert x <= ceiling(x) assert (x > ceiling(x)) == False assert Lt(x, ceiling(x)) == Lt(x, ceiling(x), evaluate=False) assert Le(x, ceiling(x)) == Le(x, ceiling(x), evaluate=False) i = symbols('i', integer=True) assert (i > floor(i)) == False assert (i < ceiling(i)) == False
[ [ [ 35, 40 ], [ 9957, 9962 ], [ 20506, 20511 ] ], [ [ 42, 48 ], [ 3563, 3569 ], [ 3603, 3609 ], [ 3643, 3649 ], [ 3683, 3689 ], [ 8421, 8427 ], [ 8571, 8577 ], [ 8614, 8620 ], [ 8658, 8664 ], [ 8701, 8707 ], [ 8745, 8751 ], [ 8791, 8797 ], [ 10970, 10976 ], [ 11007, 11013 ], [ 11045, 11051 ], [ 11082, 11088 ], [ 11120, 11126 ], [ 11157, 11163 ], [ 11195, 11201 ], [ 11232, 11238 ], [ 12600, 12606 ], [ 13293, 13299 ], [ 13388, 13394 ], [ 13567, 13573 ], [ 13720, 13726 ], [ 13757, 13763 ], [ 14822, 14828 ], [ 14861, 14867 ], [ 14901, 14907 ], [ 14940, 14946 ], [ 16254, 16260 ], [ 16298, 16304 ], [ 16339, 16345 ], [ 16383, 16389 ], [ 16424, 16430 ], [ 16468, 16474 ], [ 16510, 16516 ], [ 16554, 16560 ], [ 16819, 16825 ] ], [ [ 68, 69 ], [ 1399, 1400 ], [ 1433, 1434 ], [ 2126, 2127 ], [ 2297, 2298 ], [ 2330, 2331 ], [ 2360, 2361 ], [ 2391, 2392 ], [ 2424, 2425 ], [ 2455, 2456 ], [ 2488, 2489 ], [ 2521, 2522 ], [ 2552, 2553 ], [ 2584, 2585 ], [ 2617, 2618 ], [ 2649, 2650 ], [ 2682, 2683 ], [ 2716, 2717 ], [ 2748, 2749 ], [ 2781, 2782 ], [ 2814, 2815 ], [ 2846, 2847 ], [ 2879, 2880 ], [ 2914, 2915 ], [ 2946, 2947 ], [ 2980, 2981 ], [ 3014, 3015 ], [ 3046, 3047 ], [ 3095, 3096 ], [ 3125, 3126 ], [ 3156, 3157 ], [ 3187, 3188 ], [ 3217, 3218 ], [ 3247, 3248 ], [ 3278, 3279 ], [ 3308, 3309 ], [ 3339, 3340 ], [ 3369, 3370 ], [ 3399, 3400 ], [ 3430, 3431 ], [ 3460, 3461 ], [ 3491, 3492 ], [ 3521, 3522 ], [ 3552, 3553 ], [ 3841, 3842 ], [ 4227, 4228 ], [ 4303, 4304 ], [ 4340, 4341 ], [ 4378, 4379 ], [ 4416, 4417 ], [ 4495, 4496 ], [ 10594, 10595 ], [ 10702, 10703 ], [ 13905, 13906 ], [ 13948, 13949 ], [ 13990, 13991 ], [ 13996, 13997 ], [ 14063, 14064 ], [ 14117, 14118 ], [ 14173, 14174 ], [ 14228, 14229 ], [ 14416, 14417 ], [ 14422, 14423 ], [ 14429, 14430 ], [ 14441, 14442 ], [ 15220, 15221 ], [ 15226, 15227 ], [ 15459, 15460 ], [ 15465, 15466 ], [ 15711, 15712 ], [ 15717, 15718 ], [ 16223, 16224 ], [ 16229, 16230 ], [ 16730, 16731 ], [ 16736, 16737 ], [ 17402, 17403 ], [ 18199, 18200 ], [ 18254, 18255 ], [ 18365, 18366 ], [ 19624, 19625 ] ], [ [ 71, 77 ], [ 2076, 2082 ], [ 4580, 4586 ], [ 11991, 11997 ], [ 12027, 12033 ], [ 12190, 12196 ], [ 12210, 12216 ], [ 12241, 12247 ], [ 12437, 12443 ], [ 12667, 12673 ], [ 12712, 12718 ], [ 12748, 12754 ], [ 13471, 13477 ], [ 14696, 14702 ], [ 18122, 18128 ] ], [ [ 79, 86 ], [ 565, 572 ], [ 17046, 17053 ], [ 18426, 18433 ], [ 20543, 20550 ], [ 20619, 20626 ], [ 21020, 21027 ] ], [ [ 88, 91 ], [ 13892, 13895 ], [ 13897, 13900 ], [ 13935, 13938 ], [ 13940, 13943 ], [ 14037, 14040 ], [ 14109, 14112 ], [ 14147, 14150 ], [ 14220, 14223 ], [ 14458, 14461 ], [ 14507, 14510 ], [ 14769, 14772 ], [ 16765, 16768 ] ], [ [ 93, 95 ], [ 2285, 2287 ], [ 2290, 2292 ], [ 2317, 2319 ], [ 2323, 2325 ], [ 2349, 2351 ], [ 2379, 2381 ], [ 2384, 2386 ], [ 2411, 2413 ], [ 2417, 2419 ], [ 2444, 2446 ], [ 2475, 2477 ], [ 2481, 2483 ], [ 2507, 2509 ], [ 2514, 2516 ], [ 2540, 2542 ], [ 2571, 2573 ], [ 2577, 2579 ], [ 2603, 2605 ], [ 2610, 2612 ], [ 2637, 2639 ], [ 2670, 2672 ], [ 2675, 2677 ], [ 2703, 2705 ], [ 2709, 2711 ], [ 2737, 2739 ], [ 2769, 2771 ], [ 2774, 2776 ], [ 2801, 2803 ], [ 2807, 2809 ], [ 2835, 2837 ], [ 2866, 2868 ], [ 2872, 2874 ], [ 2900, 2902 ], [ 2907, 2909 ], [ 2934, 2936 ], [ 2967, 2969 ], [ 2973, 2975 ], [ 3000, 3002 ], [ 3007, 3009 ], [ 3034, 3036 ], [ 9563, 9565 ], [ 9623, 9625 ], [ 9668, 9670 ], [ 9729, 9731 ], [ 9823, 9825 ], [ 9866, 9868 ], [ 9928, 9930 ], [ 9950, 9952 ], [ 10052, 10054 ], [ 10068, 10070 ], [ 10094, 10096 ], [ 10111, 10113 ], [ 12501, 12503 ], [ 12545, 12547 ], [ 14008, 14010 ], [ 14013, 14015 ], [ 14449, 14451 ], [ 14454, 14456 ], [ 15238, 15240 ], [ 15243, 15245 ], [ 15477, 15479 ], [ 15482, 15484 ], [ 15729, 15731 ], [ 15734, 15736 ], [ 16241, 16243 ], [ 16756, 16758 ], [ 16761, 16763 ], [ 18167, 18169 ], [ 18189, 18191 ], [ 18221, 18223 ], [ 18243, 18245 ], [ 20584, 20586 ], [ 20598, 20600 ] ], [ [ 97, 98 ], [ 3512, 3513 ], [ 3543, 3544 ], [ 11903, 11904 ], [ 12810, 12811 ], [ 12825, 12826 ], [ 12926, 12927 ], [ 13019, 13020 ], [ 13269, 13270 ], [ 13283, 13284 ], [ 13694, 13695 ], [ 13710, 13711 ], [ 14688, 14689 ], [ 16748, 16749 ], [ 3592, 3593 ], [ 3632, 3633 ], [ 3672, 3673 ], [ 3712, 3713 ], [ 12628, 12629 ], [ 12634, 12635 ], [ 16286, 16287 ], [ 16328, 16329 ], [ 16371, 16372 ], [ 16413, 16414 ], [ 16456, 16457 ], [ 16499, 16500 ], [ 16542, 16543 ], [ 16585, 16586 ] ], [ [ 100, 102 ], [ 3822, 3824 ], [ 3830, 3832 ], [ 14004, 14006 ], [ 14437, 14439 ], [ 15234, 15236 ], [ 15473, 15475 ], [ 15725, 15727 ], [ 16237, 16239 ], [ 16744, 16746 ] ], [ [ 104, 109 ], [ 3727, 3732 ], [ 3771, 3776 ] ], [ [ 111, 114 ], [ 8925, 8928 ], [ 9228, 9231 ] ], [ [ 116, 118 ], [ 8975, 8977 ], [ 9274, 9276 ], [ 9467, 9469 ] ], [ [ 120, 123 ], [ 9015, 9018 ], [ 10147, 10150 ], [ 10198, 10201 ], [ 10249, 10252 ], [ 10307, 10310 ], [ 10365, 10368 ], [ 10423, 10426 ] ], [ [ 144, 151 ], [ 9059, 9066 ], [ 9114, 9121 ], [ 9365, 9372 ], [ 9416, 9423 ] ], [ [ 153, 156 ], [ 9168, 9171 ], [ 9471, 9474 ] ], [ [ 158, 161 ], [ 14691, 14694 ], [ 16751, 16754 ], [ 14848, 14851 ], [ 14887, 14890 ], [ 14927, 14930 ], [ 14966, 14969 ] ], [ [ 163, 167 ], [ 17665, 17669 ], [ 17881, 17885 ] ], [ [ 169, 177 ], [ 17681, 17689 ], [ 17897, 17905 ] ], [ [ 179, 187 ], [ 18296, 18304 ], [ 18356, 18364 ] ], [ [ 189, 197 ], [ 18449, 18457 ] ], [ [ 236, 241 ], [ 7984, 7989 ] ], [ [ 277, 287 ], [ 616, 626 ], [ 682, 692 ], [ 815, 825 ], [ 951, 961 ], [ 1084, 1094 ], [ 1225, 1235 ], [ 1492, 1502 ], [ 1601, 1611 ], [ 1668, 1678 ], [ 1736, 1746 ], [ 1803, 1813 ], [ 1871, 1881 ], [ 4613, 4623 ], [ 4676, 4686 ], [ 4719, 4729 ], [ 4801, 4811 ], [ 4869, 4879 ], [ 4917, 4927 ], [ 5001, 5011 ], [ 5064, 5074 ], [ 5107, 5117 ], [ 5189, 5199 ], [ 5256, 5266 ], [ 5303, 5313 ], [ 5389, 5399 ], [ 5435, 5445 ], [ 5481, 5491 ], [ 5567, 5577 ], [ 5613, 5623 ], [ 5659, 5669 ], [ 5746, 5756 ], [ 5792, 5802 ], [ 5879, 5889 ], [ 5925, 5935 ], [ 6012, 6022 ], [ 6058, 6068 ], [ 6145, 6155 ], [ 6191, 6201 ], [ 6279, 6289 ], [ 6325, 6335 ], [ 6409, 6419 ], [ 6455, 6465 ], [ 6539, 6549 ], [ 6585, 6595 ], [ 6669, 6679 ], [ 6715, 6725 ], [ 6800, 6810 ], [ 6845, 6855 ], [ 6938, 6948 ], [ 6983, 6993 ], [ 7075, 7085 ], [ 7119, 7129 ], [ 7210, 7220 ], [ 7254, 7264 ], [ 7347, 7357 ], [ 7392, 7402 ], [ 7482, 7492 ], [ 7527, 7537 ], [ 7616, 7626 ], [ 7660, 7670 ], [ 7748, 7758 ], [ 7792, 7802 ], [ 8448, 8458 ] ], [ [ 289, 297 ], [ 752, 760 ], [ 4762, 4770 ], [ 4965, 4973 ], [ 5150, 5158 ], [ 5350, 5358 ], [ 10151, 10159 ], [ 10223, 10231 ], [ 13883, 13891 ], [ 14028, 14036 ], [ 14082, 14090 ] ], [ [ 299, 309 ], [ 5527, 5537 ], [ 5705, 5715 ], [ 10170, 10180 ], [ 10202, 10212 ], [ 13924, 13934 ], [ 14136, 14146 ], [ 14191, 14201 ] ], [ [ 346, 357 ], [ 885, 896 ], [ 5838, 5849 ], [ 5971, 5982 ], [ 6104, 6115 ], [ 6237, 6248 ], [ 9310, 9321 ], [ 10336, 10347 ], [ 10369, 10380 ] ], [ [ 359, 367 ], [ 1021, 1029 ], [ 6371, 6379 ], [ 6501, 6509 ], [ 6631, 6639 ], [ 6761, 6769 ], [ 10281, 10289 ], [ 10427, 10435 ] ], [ [ 369, 386 ], [ 1153, 1170 ], [ 6891, 6908 ], [ 7029, 7046 ], [ 7164, 7181 ], [ 7299, 7316 ], [ 10253, 10270 ], [ 10446, 10463 ] ], [ [ 423, 437 ], [ 1294, 1308 ], [ 7438, 7452 ], [ 7573, 7587 ], [ 7705, 7719 ], [ 7837, 7851 ], [ 10311, 10325 ], [ 10391, 10405 ] ], [ [ 439, 442 ], [ 1526, 1529 ] ], [ [ 444, 446 ], [ 1363, 1365 ], [ 1567, 1569 ], [ 1956, 1958 ], [ 1968, 1970 ], [ 1991, 1993 ], [ 2003, 2005 ], [ 2027, 2029 ], [ 2114, 2116 ], [ 3083, 3085 ], [ 3113, 3115 ], [ 3509, 3511 ], [ 3819, 3821 ], [ 4515, 4517 ], [ 4534, 4536 ], [ 4604, 4606 ], [ 4667, 4669 ], [ 4710, 4712 ], [ 4753, 4755 ], [ 4788, 4790 ], [ 4856, 4858 ], [ 4904, 4906 ], [ 4952, 4954 ], [ 4992, 4994 ], [ 5055, 5057 ], [ 5098, 5100 ], [ 5141, 5143 ], [ 5176, 5178 ], [ 5243, 5245 ], [ 5290, 5292 ], [ 5337, 5339 ], [ 9747, 9749 ], [ 10508, 10510 ], [ 10559, 10561 ], [ 18474, 18476 ], [ 18490, 18492 ], [ 18533, 18535 ], [ 18582, 18584 ], [ 18601, 18603 ], [ 18646, 18648 ], [ 18665, 18667 ], [ 18710, 18712 ], [ 18729, 18731 ], [ 18774, 18776 ], [ 18793, 18795 ], [ 18838, 18840 ], [ 18854, 18856 ], [ 18890, 18892 ], [ 18912, 18914 ], [ 19375, 19377 ], [ 19412, 19414 ], [ 19887, 19889 ], [ 19936, 19938 ], [ 20204, 20206 ], [ 20251, 20253 ], [ 8776, 8778 ] ], [ [ 448, 450 ], [ 1634, 1636 ], [ 3205, 3207 ], [ 3235, 3237 ], [ 3894, 3896 ], [ 4359, 4361 ], [ 4475, 4477 ], [ 7335, 7337 ], [ 7380, 7382 ], [ 7426, 7428 ], [ 7470, 7472 ], [ 7515, 7517 ], [ 7561, 7563 ], [ 10884, 10886 ], [ 13359, 13361 ], [ 13538, 13540 ], [ 15341, 15343 ], [ 15553, 15555 ], [ 15805, 15807 ], [ 17204, 17206 ], [ 17242, 17244 ], [ 18163, 18165 ], [ 20891, 20893 ], [ 20912, 20914 ], [ 3669, 3671 ], [ 16365, 16367 ] ], [ [ 452, 454 ], [ 1702, 1704 ], [ 3266, 3268 ], [ 3296, 3298 ], [ 3327, 3329 ], [ 3926, 3928 ], [ 4321, 4323 ], [ 4396, 4398 ], [ 6267, 6269 ], [ 6313, 6315 ], [ 6359, 6361 ], [ 6397, 6399 ], [ 6443, 6445 ], [ 6489, 6491 ], [ 10779, 10781 ], [ 15420, 15422 ], [ 15656, 15658 ], [ 15908, 15910 ], [ 18240, 18242 ], [ 20957, 20959 ], [ 20978, 20980 ], [ 3709, 3711 ], [ 16536, 16538 ] ], [ [ 491, 493 ], [ 1769, 1771 ], [ 3357, 3359 ], [ 3387, 3389 ], [ 3957, 3959 ], [ 4208, 4210 ], [ 4245, 4247 ], [ 4264, 4266 ], [ 4434, 4436 ], [ 4454, 4456 ], [ 6788, 6790 ], [ 6833, 6835 ], [ 6879, 6881 ], [ 6926, 6928 ], [ 6971, 6973 ], [ 7017, 7019 ], [ 10832, 10834 ], [ 15302, 15304 ], [ 15502, 15504 ], [ 15754, 15756 ], [ 17123, 17125 ], [ 17135, 17137 ], [ 18218, 18220 ], [ 20703, 20705 ], [ 20722, 20724 ], [ 3589, 3591 ], [ 16280, 16282 ] ], [ [ 495, 497 ], [ 1837, 1839 ], [ 3418, 3420 ], [ 3448, 3450 ], [ 3479, 3481 ], [ 3989, 3991 ], [ 4284, 4286 ], [ 5734, 5736 ], [ 5780, 5782 ], [ 5826, 5828 ], [ 5867, 5869 ], [ 5913, 5915 ], [ 5959, 5961 ], [ 10726, 10728 ], [ 15380, 15382 ], [ 15604, 15606 ], [ 15856, 15858 ], [ 17312, 17314 ], [ 17344, 17346 ], [ 18185, 18187 ], [ 20765, 20767 ], [ 20784, 20786 ], [ 3629, 3631 ], [ 16450, 16452 ] ], [ [ 499, 501 ], [ 642, 644 ], [ 1905, 1907 ], [ 3144, 3146 ], [ 3175, 3177 ], [ 3540, 3542 ], [ 5377, 5379 ], [ 5423, 5425 ], [ 5469, 5471 ], [ 5515, 5517 ], [ 5555, 5557 ], [ 5601, 5603 ], [ 5647, 5649 ], [ 5693, 5695 ], [ 9792, 9794 ], [ 10616, 10618 ], [ 10667, 10669 ], [ 19448, 19450 ], [ 19485, 19487 ], [ 19986, 19988 ], [ 20035, 20037 ], [ 20299, 20301 ], [ 20346, 20348 ], [ 8822, 8824 ] ], [ [ 531, 539 ], [ 9551, 9559 ], [ 9611, 9619 ], [ 9658, 9666 ], [ 9719, 9727 ], [ 9813, 9821 ], [ 9854, 9862 ], [ 9918, 9926 ], [ 9938, 9946 ], [ 10040, 10048 ], [ 10056, 10064 ], [ 10084, 10092 ], [ 10101, 10109 ] ], [ [ 541, 550 ], [ 9768, 9777 ] ], [ [ 552, 553 ], [ 627, 628 ], [ 645, 646 ], [ 693, 694 ], [ 720, 721 ], [ 826, 827 ], [ 853, 854 ], [ 962, 963 ], [ 989, 990 ], [ 1095, 1096 ], [ 1121, 1122 ], [ 1236, 1237 ], [ 1262, 1263 ], [ 1366, 1367 ], [ 1390, 1391 ], [ 1424, 1425 ], [ 1472, 1473 ], [ 1476, 1477 ], [ 1533, 1534 ], [ 1537, 1538 ], [ 1573, 1574 ], [ 1577, 1578 ], [ 1640, 1641 ], [ 1644, 1645 ], [ 1708, 1709 ], [ 1712, 1713 ], [ 1775, 1776 ], [ 1779, 1780 ], [ 1843, 1844 ], [ 1847, 1848 ], [ 1911, 1912 ], [ 1915, 1916 ], [ 1959, 1960 ], [ 1971, 1972 ], [ 1994, 1995 ], [ 2006, 2007 ], [ 2030, 2031 ], [ 2033, 2034 ], [ 3884, 3885 ], [ 3897, 3898 ], [ 3915, 3916 ], [ 3929, 3930 ], [ 3947, 3948 ], [ 3960, 3961 ], [ 3978, 3979 ], [ 3992, 3993 ], [ 4518, 4519 ], [ 4537, 4538 ], [ 8905, 8906 ], [ 8929, 8930 ], [ 8955, 8956 ], [ 8978, 8979 ], [ 9005, 9006 ], [ 9019, 9020 ], [ 9038, 9039 ], [ 9067, 9068 ], [ 9093, 9094 ], [ 9129, 9130 ], [ 9148, 9149 ], [ 9172, 9173 ], [ 9210, 9211 ], [ 9256, 9257 ], [ 9302, 9303 ], [ 9346, 9347 ], [ 9397, 9398 ], [ 9448, 9449 ], [ 9532, 9533 ], [ 9591, 9592 ], [ 9639, 9640 ], [ 9699, 9700 ], [ 9750, 9751 ], [ 9795, 9796 ], [ 9895, 9896 ], [ 10018, 10019 ], [ 10160, 10161 ], [ 10181, 10182 ], [ 10213, 10214 ], [ 10232, 10233 ], [ 10271, 10272 ], [ 10290, 10291 ], [ 10326, 10327 ], [ 10348, 10349 ], [ 10381, 10382 ], [ 10406, 10407 ], [ 10436, 10437 ], [ 10464, 10465 ], [ 10511, 10512 ], [ 10514, 10515 ], [ 10562, 10563 ], [ 10565, 10566 ], [ 10619, 10620 ], [ 10622, 10623 ], [ 10670, 10671 ], [ 10673, 10674 ], [ 10729, 10730 ], [ 10732, 10733 ], [ 10782, 10783 ], [ 10785, 10786 ], [ 10835, 10836 ], [ 10838, 10839 ], [ 10887, 10888 ], [ 10890, 10891 ], [ 11906, 11907 ], [ 13987, 13988 ], [ 14413, 14414 ], [ 15214, 15215 ], [ 15505, 15506 ], [ 15531, 15532 ], [ 15556, 15557 ], [ 15582, 15583 ], [ 15607, 15608 ], [ 15633, 15634 ], [ 15659, 15660 ], [ 15685, 15686 ], [ 15760, 15761 ], [ 15787, 15788 ], [ 15811, 15812 ], [ 15838, 15839 ], [ 15862, 15863 ], [ 15890, 15891 ], [ 15914, 15915 ], [ 15942, 15943 ], [ 16220, 16221 ], [ 16727, 16728 ], [ 17126, 17127 ], [ 17138, 17139 ], [ 17207, 17208 ], [ 17245, 17246 ], [ 17315, 17316 ], [ 17347, 17348 ], [ 18305, 18306 ], [ 18317, 18318 ], [ 18323, 18324 ], [ 18331, 18332 ], [ 18338, 18339 ], [ 18373, 18374 ], [ 18380, 18381 ], [ 19296, 19297 ], [ 19320, 19321 ], [ 19335, 19336 ], [ 19361, 19362 ], [ 19378, 19379 ], [ 19418, 19419 ], [ 19451, 19452 ], [ 19491, 19492 ], [ 19522, 19523 ], [ 19548, 19549 ], [ 19563, 19564 ], [ 19587, 19588 ], [ 19825, 19826 ], [ 19855, 19856 ], [ 19895, 19896 ], [ 19939, 19940 ], [ 19994, 19995 ], [ 20038, 20039 ], [ 20084, 20085 ], [ 20115, 20116 ], [ 20146, 20147 ], [ 20174, 20175 ], [ 20210, 20211 ], [ 20254, 20255 ], [ 20305, 20306 ], [ 20349, 20350 ], [ 20394, 20395 ], [ 20424, 20425 ], [ 20444, 20445 ], [ 20465, 20466 ], [ 20496, 20497 ], [ 8602, 8603 ], [ 8645, 8646 ], [ 8689, 8690 ], [ 8732, 8733 ], [ 8779, 8780 ], [ 8825, 8826 ] ], [ [ 555, 556 ], [ 630, 631 ], [ 648, 649 ], [ 696, 697 ], [ 804, 805 ], [ 829, 830 ], [ 940, 941 ], [ 965, 966 ], [ 1073, 1074 ], [ 1098, 1099 ], [ 1214, 1215 ], [ 1239, 1240 ], [ 1352, 1353 ], [ 1503, 1504 ], [ 1530, 1531 ], [ 1570, 1571 ], [ 1612, 1613 ], [ 1637, 1638 ], [ 1679, 1680 ], [ 1705, 1706 ], [ 1747, 1748 ], [ 1772, 1773 ], [ 1814, 1815 ], [ 1840, 1841 ], [ 1882, 1883 ], [ 1908, 1909 ], [ 3888, 3889 ], [ 3900, 3901 ], [ 3920, 3921 ], [ 3932, 3933 ], [ 3951, 3952 ], [ 3963, 3964 ], [ 3983, 3984 ], [ 3995, 3996 ], [ 8909, 8910 ], [ 8933, 8934 ], [ 8959, 8960 ], [ 8982, 8983 ], [ 9009, 9010 ], [ 9023, 9024 ], [ 9042, 9043 ], [ 9071, 9072 ], [ 9097, 9098 ], [ 9133, 9134 ], [ 9152, 9153 ], [ 9176, 9177 ], [ 9214, 9215 ], [ 9260, 9261 ], [ 9306, 9307 ], [ 9350, 9351 ], [ 9401, 9402 ], [ 9452, 9453 ], [ 10020, 10021 ], [ 10163, 10164 ], [ 10184, 10185 ], [ 10216, 10217 ], [ 10235, 10236 ], [ 10274, 10275 ], [ 10293, 10294 ], [ 10329, 10330 ], [ 10351, 10352 ], [ 10384, 10385 ], [ 10409, 10410 ], [ 10439, 10440 ], [ 10467, 10468 ], [ 15217, 15218 ], [ 15456, 15457 ], [ 15708, 15709 ], [ 17210, 17211 ], [ 17219, 17220 ], [ 18308, 18309 ], [ 18319, 18320 ], [ 19300, 19301 ], [ 19316, 19317 ], [ 19340, 19341 ], [ 19356, 19357 ], [ 19381, 19382 ], [ 19415, 19416 ], [ 19454, 19455 ], [ 19488, 19489 ], [ 19527, 19528 ], [ 19543, 19544 ], [ 19567, 19568 ], [ 19583, 19584 ], [ 20150, 20151 ], [ 20179, 20180 ], [ 20207, 20208 ], [ 20257, 20258 ], [ 20302, 20303 ], [ 20352, 20353 ], [ 20399, 20400 ], [ 20428, 20429 ], [ 20500, 20501 ] ], [ [ 558, 559 ], [ 723, 724 ], [ 782, 783 ], [ 856, 857 ], [ 918, 919 ], [ 992, 993 ], [ 1051, 1052 ], [ 1124, 1125 ], [ 1192, 1193 ], [ 1265, 1266 ], [ 1330, 1331 ], [ 8915, 8916 ], [ 8936, 8937 ], [ 8965, 8966 ], [ 8985, 8986 ], [ 9049, 9050 ], [ 9074, 9075 ], [ 9104, 9105 ], [ 9122, 9123 ], [ 9158, 9159 ], [ 9179, 9180 ], [ 9220, 9221 ], [ 9266, 9267 ], [ 9357, 9358 ], [ 9408, 9409 ], [ 9458, 9459 ] ], [ [ 561, 562 ], [ 8919, 8920 ], [ 8940, 8941 ], [ 8969, 8970 ], [ 8989, 8990 ], [ 9053, 9054 ], [ 9078, 9079 ], [ 9108, 9109 ], [ 9126, 9127 ], [ 9162, 9163 ], [ 9183, 9184 ], [ 9224, 9225 ], [ 9270, 9271 ], [ 9361, 9362 ], [ 9412, 9413 ], [ 9462, 9463 ] ], [ [ 590, 601 ] ], [ [ 657, 670 ] ], [ [ 1447, 1460 ] ], [ [ 1934, 1941 ] ], [ [ 2140, 2157 ] ], [ [ 3059, 3068 ] ], [ [ 3855, 3868 ] ], [ [ 4004, 4013 ] ], [ [ 4549, 4568 ] ], [ [ 8487, 8514 ] ], [ [ 8839, 8868 ] ], [ [ 9483, 9516 ] ], [ [ 9967, 10002 ] ], [ [ 10124, 10132 ] ], [ [ 10476, 10489 ] ], [ [ 10927, 10958 ], [ 11871, 11902 ], [ 12094, 12125 ], [ 12469, 12500 ], [ 12512, 12543 ], [ 12778, 12809 ], [ 12878, 12909 ], [ 14472, 14503 ], [ 14734, 14765 ] ], [ [ 11272, 11309 ], [ 12299, 12336 ], [ 12966, 13003 ], [ 13062, 13099 ] ], [ [ 11802, 11841 ] ], [ [ 11915, 11944 ] ], [ [ 12349, 12396 ] ], [ [ 12558, 12592 ] ], [ [ 13112, 13145 ] ], [ [ 13800, 13828 ] ], [ [ 14241, 14273 ] ], [ [ 14518, 14547 ] ], [ [ 14780, 14814 ] ], [ [ 14982, 15016 ] ], [ [ 15951, 15993 ] ], [ [ 16594, 16630 ] ], [ [ 16863, 16895 ] ], [ [ 17375, 17390 ] ], [ [ 18095, 18110 ] ], [ [ 18268, 18281 ] ], [ [ 18394, 18405 ] ], [ [ 19267, 19280 ] ], [ [ 19596, 19610 ] ], [ [ 20516, 20531 ] ] ]
from flask import Flask, Response from camera import Camera import cv2 app = Flask(__name__) camera = Camera().start() def gen(camera): while True: frame = camera.read() _, jpeg = cv2.imencode('.jpg', frame) yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n') @app.route('/stream') def stream(): return Response(gen(camera), mimetype='multipart/x-mixed-replace; boundary=frame') if __name__ == '__main__': app.run(host='0.0.0.0', debug=False, use_reloader=False)
[ [ [ 18, 23 ], [ 79, 84 ] ], [ [ 25, 33 ], [ 397, 405 ] ], [ [ 53, 59 ], [ 104, 110 ] ], [ [ 67, 70 ], [ 204, 207 ] ], [ [ 73, 76 ], [ 351, 354 ], [ 522, 525 ] ], [ [ 95, 101 ], [ 410, 416 ] ], [ [ 127, 130 ], [ 406, 409 ] ], [ [ 376, 382 ] ] ]
from glob import glob from setuptools import setup from pybind11.setup_helpers import Pybind11Extension ext_modules = [ Pybind11Extension( "PFlib", # sorted(glob("*.cpp")), # Sort source files for reproducibility ["particle_filter.cpp",], swig_opts=['-ggdb',], include_dirs=['include',] ), ] setup( name="PFlib", # extra_compile_args=['-O0','-Wall','-g'], # extra_compile_args=['-Iinclude'], ext_modules=ext_modules, )
[ [ [ 17, 21 ] ], [ [ 45, 50 ], [ 343, 348 ] ], [ [ 86, 103 ], [ 125, 142 ] ], [ [ 105, 116 ], [ 471, 482 ] ] ]
from sklearn import svm, cluster from PIL import Image, ImageDraw import os import sys import random def load_images(dirname): images = [] for image_name in os.listdir(dirname): if image_name.startswith('.'): continue image = Image.open(dirname + '/' + image_name).convert('1') x, y = image.size image = image.resize((x, 280), Image.ANTIALIAS) data = [0 if pixel == 0 else 1 for pixel in image.getdata()] images.append(data) return images min_len = 10000000 def normalize(X): global min_len min_len = min(min_len, min(len(x) for x in X)) return [x[:min_len] for x in X] def crossvalidate(edges, nonedges): random.shuffle(edges) random.shuffle(nonedges) train_edge_len, train_nonedge_len = len(edges) * 7 // 10, len(nonedges) * 7 // 10 cross_edge_len, cross_nonedge_len = len(edges) - train_edge_len, len(nonedges) - train_nonedge_len X_train = normalize(nonedges[:train_nonedge_len] + edges[:train_edge_len]) y_train = [0] * train_nonedge_len + [1] * train_edge_len X_cross = normalize(nonedges[train_nonedge_len:] + edges[train_edge_len:]) y_cross = [0] * cross_nonedge_len + [1] * cross_edge_len clf = svm.SVC(gamma=.001, C=100.) clf.fit(X_train, y_train) print("prediction: {}".format(list(clf.predict(X_cross)))) print("actuallity: {}".format(y_cross)) print(clf.score(X_cross, y_cross)) def get_column(img, i): w, h = img.size column = [] for j in range(h): column.append(0 if img.getpixel((i, j)) == 0 else 1) return column def search_picture(clf, image_name): image = Image.open(image_name).convert('1') x, y = image.size image = image.resize((x, 280), Image.ANTIALIAS) w, h = image.size columns = [get_column(image, i) for i in range(25)] datas = [] for i in range(25, w): columns = columns[1:] + [get_column(image, i)] data = [columns[i][j] for j in range(len(columns[0])) for i in range(len(columns))] datas.append(data) datas = normalize(datas) matches = [[i] for i, m in enumerate(clf.predict(datas)) if m == 1] if len(matches) == 0: return [], matches clst = cluster.DBSCAN(eps=20, min_samples=1) clst.fit(matches) trimmed = [idx for idx in clst.components_ if idx > w // 6 and idx < w * 5 // 6] clst = cluster.KMeans(3, init='k-means++') clst.fit(trimmed) seps = list(sorted([int(v[0]) + 25//2 for v in clst.cluster_centers_])) final_seps = [] for start, end in zip(seps, seps[1:]): if (end - start) > w // 6: final_seps.append(start) final_seps.append(seps[-1]) return final_seps, matches def train(edges, nonedges): clf = svm.SVC(gamma=.001, C=100.) X = normalize(nonedges + edges) y = [0] * len(nonedges) + [1] * len(edges) clf.fit(X, y) return clf def main(edge_dir, non_edge_dir): edges = load_images(edge_dir) nonedges = load_images(non_edge_dir) crossvalidate(edges, nonedges) clf = train(edges, nonedges) for comic in os.listdir('test'): print(comic) panels, matches = search_picture(clf, 'test/' + comic) print("\tpanels: {}".format(panels)) image = Image.open('test/' + comic).convert('RGBA') draw = ImageDraw.Draw(image) w, h = image.size for match in matches: match = match[0] draw.line((match, 0) + (match, h), fill=(0,0,255,0)) for sep in panels: draw.line((sep, 0) + (sep, h), fill=(255,0,0), width=3) image.show() return clf if __name__ == '__main__': if len(sys.argv) != 3: print('Usage: {} <edges-dir> <non-edges-dir>'.format(sys.argv[0])) sys.exit(1) edge_dir = sys.argv[1] non_edge_dir = sys.argv[2] main(edge_dir, non_edge_dir)
[ [ [ 20, 23 ], [ 1156, 1159 ], [ 2539, 2542 ] ], [ [ 25, 32 ], [ 2057, 2064 ], [ 2204, 2211 ] ], [ [ 49, 54 ], [ 238, 243 ], [ 343, 348 ], [ 1541, 1546 ], [ 1628, 1633 ], [ 2996, 3001 ] ], [ [ 56, 65 ], [ 3049, 3058 ] ], [ [ 73, 75 ], [ 162, 164 ], [ 2855, 2857 ] ], [ [ 83, 86 ], [ 3335, 3338 ], [ 3406, 3409 ], [ 3422, 3425 ], [ 3446, 3449 ], [ 3474, 3477 ] ], [ [ 94, 100 ], [ 633, 639 ], [ 656, 662 ] ], [ [ 108, 119 ], [ 2716, 2727 ], [ 2750, 2761 ] ], [ [ 461, 468 ], [ 529, 536 ] ], [ [ 484, 493 ], [ 876, 885 ], [ 1018, 1027 ], [ 1919, 1928 ], [ 2572, 2581 ] ], [ [ 600, 613 ], [ 2778, 2791 ] ], [ [ 1353, 1363 ], [ 1677, 1687 ], [ 1781, 1791 ] ], [ [ 1499, 1513 ], [ 2910, 2924 ] ], [ [ 2508, 2513 ], [ 2817, 2822 ] ], [ [ 2677, 2681 ], [ 3487, 3491 ] ], [ [ 3435, 3443 ], [ 3492, 3500 ] ], [ [ 3459, 3471 ], [ 3502, 3514 ] ], [ [ 515, 522 ], [ 574, 581 ] ] ]
import logging import os import re import xml.etree.ElementTree as ET from pathlib import Path from typing import Any, Tuple, Optional import pandas as pd from python import TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX, TOKEN_IDX_TO, \ TOKEN_IDX_FROM, TOKEN, MENTION_ID, EVENT, MENTION_TYPE, DESCRIPTION, MENTION_TYPES_ACTION logger = logging.getLogger() def read_xml(xml_path) -> Tuple[Any, Any, Any, Any, Any]: tree = ET.parse(xml_path) # 1: read document info root = tree.getroot() assert root.tag == "Document" doc_filename = root.attrib["doc_name"] doc_id = root.attrib["doc_id"] m = re.match(r"(?P<topic_id>\d+)_(?P<document_number>\d+)(?P<subtopic>\w+)\.xml", doc_filename) topic_id = m.group("topic_id") subtopic = m.group("subtopic") document_number = int(m.group("document_number")) documents_index = pd.MultiIndex.from_tuples([(topic_id, subtopic, doc_id)], names=[TOPIC_ID, SUBTOPIC, DOCUMENT_ID]) documents = pd.DataFrame({DOCUMENT_ID: pd.Series(doc_id, index=documents_index), DOCUMENT_NUMBER: pd.Series(document_number, index=documents_index)}) # 2: read document content contents_rows = [] contents_index = [] for token_elmt in root.iter("token"): # index content sentence_idx = int(token_elmt.attrib["sentence"]) token_idx = int(token_elmt.attrib["number"]) contents_index.append((doc_id, sentence_idx, token_idx)) # content token = token_elmt.text contents_rows.append({TOKEN: token}) contents_index = pd.MultiIndex.from_tuples(contents_index, names=[DOCUMENT_ID, SENTENCE_IDX, TOKEN_IDX]) contents = pd.DataFrame(contents_rows, index=contents_index) # 3: read markables / mentions and entity/event descriptions mentions_rows = [] mentions_index = [] entities_events = [] for markable in root.find("Markables").getchildren(): # Don't know what this is, skip it if markable.tag == "UNKNOWN_INSTANCE_TAG": continue mention_id = int(markable.attrib["m_id"]) # there are markables without spans, these are descriptions of entities / events which we want to keep if "TAG_DESCRIPTOR" in markable.attrib.keys(): if "instance_id" in markable.attrib.keys(): entities_events.append({ EVENT: markable.attrib["instance_id"], DESCRIPTION: markable.attrib["TAG_DESCRIPTOR"] }) continue token_ids = [int(anchor.attrib["t_id"]) for anchor in markable.iter("token_anchor")] token_ids_from, token_ids_to = min(token_ids), max(token_ids) # the token_ids are cumulative token indexes, remove their cumulative nature token_indexes = contents.index.get_level_values(TOKEN_IDX).values token_idx_from = token_indexes[ token_ids_from - 1] # -1 because token_ids start at 1, so we need to access index 0 in the dataframe to find t_id 1 token_idx_to = token_indexes[ token_ids_to - 1] + 1 # additionally +1 here because we want mention spans represented as intervals [from, to[ sentence_idx = contents.index.get_level_values(SENTENCE_IDX).values[token_ids_from - 1] # resolve non-contiguous mentions is_non_contiguous_mention = len(token_ids) < token_idx_from - token_idx_to if is_non_contiguous_mention: logger.info("Converted non-contiguous mention to contiguous mention.") mentions_index.append((doc_id, mention_id)) mentions_rows.append({SENTENCE_IDX: sentence_idx, TOKEN_IDX_FROM: token_idx_from, TOKEN_IDX_TO: token_idx_to, MENTION_TYPE: markable.tag}) mentions_index = pd.MultiIndex.from_tuples(mentions_index, names=[DOCUMENT_ID, MENTION_ID]) mentions = pd.DataFrame(mentions_rows, index=mentions_index) entities_events = pd.DataFrame(entities_events).set_index(EVENT) # 4. read relations (clusters) clusters_rows = [] for relation in root.find("Relations").getchildren(): tags_of_interest = ["CROSS_DOC_COREF", "INTRA_DOC_COREF"] if not relation.tag in tags_of_interest: logger.info("Unexpected tag " + relation.tag) raise NotImplementedError # There are relations with tags INTRA_DOC_COREF and CROSS_DOC_COREF. The cross-doc ones have a "note" attribute. if "note" in relation.attrib: # this is the case for CROSS_DOC_COREF tags relation_id = relation.attrib["note"] else: # this is the case for INTRA_DOC_COREF tags relation_id = doc_id + "_" + relation.attrib["r_id"] for mention in relation.iter("source"): mention_id = int(mention.attrib["m_id"]) clusters_rows.append({EVENT: relation_id, DOCUMENT_ID: doc_id, MENTION_ID: mention_id}) clusters = pd.DataFrame(clusters_rows) # 5. create relations for singletons # In ECB plus, there are ACTION_OCCURRENCE markables which are not assigned to a relation. These are singletons. We # add one entry for each singleton to `clusters` to ensure consistency. Note that the opposite also exists: # singleton mentions which are marked as participating in a cross-doc coref relation, but there is no second # mention for this relation. if clusters.empty: singletons = mentions.index.to_frame().reset_index(drop=True) else: # This can most likely be done in a nicer way using some index difference... outer = pd.merge(mentions, clusters, left_index=True, right_on=[DOCUMENT_ID, MENTION_ID], how="outer") singletons = outer.loc[outer[EVENT].isna(), [DOCUMENT_ID, MENTION_ID]] singletons[EVENT] = "SINGLETON_" + singletons.astype(str).apply("_".join, axis=1) clusters = clusters.append(singletons, sort=False).reset_index(drop=True) return documents, contents, mentions, clusters, entities_events def read_split_data(root: Path, sentence_filter_csv: Optional[Path]): documents = [] contents = [] mentions = [] clusters = [] entities_events = [] # enumerate files for root, dirs, files in os.walk(str(root.absolute())): for file in files: path = os.path.abspath(os.path.join(root, file)) f_documents, f_contents, f_mentions, f_clusters, f_entities_events = read_xml(path) documents.append(f_documents) contents.append(f_contents) mentions.append(f_mentions) clusters.append(f_clusters) entities_events.append(f_entities_events) documents = pd.concat(documents).sort_index() contents = pd.concat(contents).sort_index() mentions = pd.concat(mentions).sort_index() clusters = pd.concat(clusters, sort=False) entities_events = pd.concat(entities_events).sort_index() # assert that every mention participates only in one cluster -> meaning we can just add an 'EVENT' column to each mention assert clusters.duplicated(subset=[DOCUMENT_ID, MENTION_ID]).value_counts().get(True, 0) == 0 clusters = clusters.set_index([DOCUMENT_ID, MENTION_ID]) mentions = pd.merge(mentions, clusters, left_index=True, right_index=True).sort_index() # read file which tells us from which sentences we should keep event mentions if sentence_filter_csv is not None: sent_filter = pd.read_csv(sentence_filter_csv) doc_number_and_subtopic = sent_filter["File"].str.split("ecb", expand=True) doc_number_and_subtopic.columns = [DOCUMENT_NUMBER, SUBTOPIC] doc_number_and_subtopic[DOCUMENT_NUMBER] = doc_number_and_subtopic[DOCUMENT_NUMBER].astype(int) doc_number_and_subtopic[SUBTOPIC].replace({"plus": "ecbplus", "": "ecb"}, inplace=True) sent_filter = pd.concat([sent_filter.drop(columns="File"), doc_number_and_subtopic], axis=1) sent_filter.rename(columns={"Topic": TOPIC_ID, "Sentence Number": SENTENCE_IDX}, inplace=True) sent_filter[TOPIC_ID] = sent_filter[TOPIC_ID].astype(str) sent_filter = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER, SENTENCE_IDX]] # the sentence filter file applies to all splits, remove those topics that we don't have in the split we're loading topics_in_split = documents.index.get_level_values(TOPIC_ID).unique() sent_filter = sent_filter.loc[sent_filter[TOPIC_ID].isin(topics_in_split)].copy() # obtain doc-id from topic+subtopic+document number documents_with_doc_number_in_index = documents.set_index(DOCUMENT_NUMBER, append=True).reset_index(level=DOCUMENT_ID, drop=True).sort_index() sent_filter[DOCUMENT_ID] = sent_filter[[TOPIC_ID, SUBTOPIC, DOCUMENT_NUMBER]].apply(lambda row: documents_with_doc_number_in_index[DOCUMENT_ID].loc[tuple(row.values)], axis=1) all_mentions_to_keep = [] for doc_id, df in mentions.groupby(DOCUMENT_ID): sentences_to_keep = sent_filter.loc[sent_filter[DOCUMENT_ID] == doc_id] # we only remove action phrases and leave the other mentions in place, so that we can potentially mask them for # analysis, see python.handwritten_baseline.pipeline.data.processing.masking.MentionMaskingStage is_official_evaluation_sentence = df[SENTENCE_IDX].isin(sentences_to_keep[SENTENCE_IDX]) is_action_mention = df[MENTION_TYPE].isin(MENTION_TYPES_ACTION) mentions_to_keep = df.loc[is_official_evaluation_sentence | (~is_action_mention)] all_mentions_to_keep.append(mentions_to_keep) mentions = pd.concat(all_mentions_to_keep).sort_index() return documents, contents, mentions, entities_events
[ [ [ 7, 14 ], [ 371, 378 ] ], [ [ 22, 24 ], [ 6371, 6373 ], [ 6448, 6450 ], [ 6464, 6466 ] ], [ [ 32, 34 ], [ 656, 658 ] ], [ [ 42, 69 ], [ 462, 464 ] ], [ [ 90, 94 ], [ 6177, 6181 ], [ 6213, 6217 ] ], [ [ 114, 117 ], [ 425, 428 ], [ 430, 433 ], [ 435, 438 ], [ 440, 443 ], [ 445, 448 ] ], [ [ 119, 124 ], [ 419, 424 ] ], [ [ 126, 134 ], [ 6204, 6212 ] ], [ [ 143, 155 ], [ 896, 898 ], [ 1059, 1061 ], [ 1086, 1088 ], [ 1175, 1177 ], [ 1665, 1667 ], [ 1768, 1770 ], [ 3935, 3937 ], [ 4025, 4027 ], [ 4097, 4099 ], [ 5090, 5092 ], [ 5742, 5744 ], [ 6820, 6822 ], [ 6869, 6871 ], [ 6917, 6919 ], [ 6965, 6967 ], [ 7019, 7021 ], [ 7361, 7363 ], [ 7583, 7585 ], [ 7992, 7994 ], [ 9773, 9775 ] ], [ [ 176, 184 ], [ 1009, 1017 ], [ 8116, 8124 ], [ 8218, 8226 ], [ 8194, 8202 ], [ 8275, 8283 ], [ 8511, 8519 ], [ 8580, 8588 ], [ 8879, 8887 ] ], [ [ 186, 194 ], [ 1019, 1027 ], [ 7760, 7768 ], [ 7906, 7914 ], [ 8285, 8293 ], [ 8889, 8897 ] ], [ [ 196, 211 ], [ 1158, 1173 ], [ 7743, 7758 ], [ 7845, 7860 ], [ 7802, 7817 ], [ 8295, 8310 ], [ 8746, 8761 ], [ 8899, 8914 ] ], [ [ 213, 224 ], [ 1029, 1040 ], [ 1073, 1084 ], [ 1714, 1725 ], [ 3984, 3995 ], [ 5029, 5040 ], [ 5798, 5809 ], [ 5890, 5901 ], [ 7225, 7236 ], [ 7320, 7331 ], [ 8794, 8805 ], [ 8851, 8862 ], [ 9093, 9104 ], [ 9167, 9178 ], [ 8970, 8981 ] ], [ [ 226, 238 ], [ 1727, 1739 ], [ 3336, 3348 ], [ 3707, 3719 ], [ 8145, 8157 ], [ 8312, 8324 ], [ 9474, 9486 ], [ 9511, 9523 ] ], [ [ 240, 249 ], [ 1741, 1750 ], [ 2916, 2925 ] ], [ [ 251, 263 ], [ 3827, 3839 ] ], [ [ 271, 285 ], [ 3765, 3779 ] ], [ [ 287, 292 ], [ 1629, 1634 ] ], [ [ 294, 304 ], [ 3997, 4007 ], [ 5050, 5060 ], [ 5811, 5821 ], [ 5903, 5913 ], [ 7238, 7248 ], [ 7333, 7343 ] ], [ [ 306, 311 ], [ 2464, 2469 ], [ 4137, 4142 ], [ 5009, 5014 ], [ 5874, 5879 ], [ 5931, 5936 ] ], [ [ 313, 325 ], [ 3885, 3897 ], [ 9561, 9573 ] ], [ [ 327, 338 ], [ 2523, 2534 ] ], [ [ 340, 360 ], [ 9580, 9600 ] ], [ [ 362, 368 ], [ 3553, 3559 ], [ 4388, 4394 ] ], [ [ 397, 405 ], [ 6571, 6579 ] ], [ [ 6155, 6170 ] ] ]
""" Django settings for simplesocial project. Generated by 'django-admin startproject' using Django 2.0.5. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) TEMPLAE_DIR = os.path.join(BASE_DIR, 'templates') # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '%2^#bfjd61932k^l^fz4ztt+e*9ahjda(7w3xaa0d+^@a&=-6*' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'bootstrap4', 'accounts', 'groups', 'posts', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'simplesocial.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [TEMPLAE_DIR,], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'simplesocial.wsgi.application' # Database # https://docs.djangoproject.com/en/2.0/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'),] LOGIN_REDIRECT_URL = 'test' LOGOUT_REDIRECT_URL = 'thanks'
[ [ [ 320, 322 ], [ 407, 409 ], [ 423, 425 ], [ 439, 441 ], [ 481, 483 ], [ 2355, 2357 ], [ 3248, 3250 ] ], [ [ 396, 404 ], [ 494, 502 ], [ 2368, 2376 ], [ 3261, 3269 ] ], [ [ 467, 478 ], [ 1741, 1752 ] ], [ [ 721, 731 ] ], [ [ 854, 859 ] ], [ [ 868, 881 ] ], [ [ 915, 929 ] ], [ [ 1184, 1194 ] ], [ [ 1598, 1610 ] ], [ [ 1634, 1643 ] ], [ [ 2131, 2147 ] ], [ [ 2260, 2269 ] ], [ [ 2505, 2529 ] ], [ [ 3008, 3021 ] ], [ [ 3033, 3042 ] ], [ [ 3052, 3060 ] ], [ [ 3069, 3077 ] ], [ [ 3086, 3092 ] ], [ [ 3204, 3214 ] ], [ [ 3228, 3244 ] ], [ [ 3284, 3302 ] ], [ [ 3312, 3331 ] ] ]
#!/usr/bin/env python #coding: utf-8 import sys from common import reverse_items if len(sys.argv) != 3: print("Reverse key and value of all pairs") print(("Usage: ", sys.argv[0], "[input] [output]")) exit(1) reverse_items(sys.argv[1], sys.argv[2])
[ [ [ 44, 47 ], [ 89, 92 ], [ 171, 174 ], [ 230, 233 ], [ 243, 246 ] ], [ [ 67, 80 ], [ 216, 229 ] ] ]
# Copyright (C) 2015-2016 Regents of the University of California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import os import textwrap applianceSelf = os.environ['TOIL_APPLIANCE_SELF'] sdistName = os.environ['_TOIL_SDIST_NAME'] dependencies = ' '.join(['libffi-dev', # For client side encryption for extras with PyNACL 'python3.6', 'python3.6-dev', 'python-dev', # For installing Python packages with native code 'python-pip', # Bootstrap pip, but needs upgrading, see below 'python3-pip', 'libcurl4-openssl-dev', 'libssl-dev', 'wget', 'curl', 'openssh-server', 'mesos=1.0.1-2.0.94.ubuntu1604', "nodejs", # CWL support for javascript expressions 'rsync', 'screen', 'build-essential', # We need a build environment to build Singularity 3. 'uuid-dev', 'libgpgme11-dev', 'libseccomp-dev', 'pkg-config', 'squashfs-tools', 'cryptsetup', 'git']) def heredoc(s): s = textwrap.dedent(s).format(**globals()) return s[1:] if s.startswith('\n') else s motd = heredoc(''' This is the Toil appliance. You can run your Toil script directly on the appliance. Run toil <workflow>.py --help to see all options for running your workflow. For more information see http://toil.readthedocs.io/en/latest/ Copyright (C) 2015-2018 Regents of the University of California Version: {applianceSelf} ''') # Prepare motd to be echoed in the Dockerfile using a RUN statement that uses bash's print motd = ''.join(l + '\\n\\\n' for l in motd.splitlines()) print(heredoc(''' FROM ubuntu:16.04 RUN apt-get -y update --fix-missing && apt-get -y upgrade && apt-get -y install apt-transport-https ca-certificates software-properties-common && apt-get clean && rm -rf /var/lib/apt/lists/* RUN echo "deb http://repos.mesosphere.io/ubuntu/ xenial main" \ > /etc/apt/sources.list.d/mesosphere.list \ && apt-key adv --keyserver keyserver.ubuntu.com --recv E56151BF \ && echo "deb http://deb.nodesource.com/node_6.x xenial main" \ > /etc/apt/sources.list.d/nodesource.list \ && apt-key adv --keyserver keyserver.ubuntu.com --recv 68576280 RUN add-apt-repository -y ppa:deadsnakes/ppa RUN apt-get -y update --fix-missing && \ DEBIAN_FRONTEND=noninteractive apt-get -y upgrade && \ DEBIAN_FRONTEND=noninteractive apt-get -y install {dependencies} && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* RUN wget https://dl.google.com/go/go1.13.3.linux-amd64.tar.gz && \ tar xvf go1.13.3.linux-amd64.tar.gz && \ mv go/bin/* /usr/bin/ && \ mv go /usr/local/ RUN mkdir -p $(go env GOPATH)/src/github.com/sylabs && \ cd $(go env GOPATH)/src/github.com/sylabs && \ git clone https://github.com/sylabs/singularity.git && \ cd singularity && \ git checkout v3.4.2 && \ ./mconfig && \ cd ./builddir && \ make -j4 && \ make install RUN mkdir /root/.ssh && \ chmod 700 /root/.ssh ADD waitForKey.sh /usr/bin/waitForKey.sh ADD customDockerInit.sh /usr/bin/customDockerInit.sh RUN chmod 777 /usr/bin/waitForKey.sh && chmod 777 /usr/bin/customDockerInit.sh # The stock pip is too old and can't install from sdist with extras RUN pip install --upgrade pip==9.0.1 # Default setuptools is too old RUN pip install --upgrade setuptools==36.5.0 # Include virtualenv, as it is still the recommended way to deploy pipelines RUN pip install --upgrade virtualenv==15.0.3 # Install s3am (--never-download prevents silent upgrades to pip, wheel and setuptools) RUN virtualenv --never-download /home/s3am \ && /home/s3am/bin/pip install s3am==2.0 \ && ln -s /home/s3am/bin/s3am /usr/local/bin/ # Install statically linked version of docker client RUN curl https://download.docker.com/linux/static/stable/x86_64/docker-18.06.1-ce.tgz \ | tar -xvzf - --transform='s,[^/]*/,,g' -C /usr/local/bin/ \ && chmod u+x /usr/local/bin/docker # Fix for Mesos interface dependency missing on ubuntu RUN pip install protobuf==3.0.0 # Fix for https://issues.apache.org/jira/browse/MESOS-3793 ENV MESOS_LAUNCHER=posix # Fix for `screen` (https://github.com/BD2KGenomics/toil/pull/1386#issuecomment-267424561) ENV TERM linux # Run bash instead of sh inside of screen ENV SHELL /bin/bash RUN echo "defshell -bash" > ~/.screenrc # An appliance may need to start more appliances, e.g. when the leader appliance launches the # worker appliance on a worker node. To support this, we embed a self-reference into the image: ENV TOIL_APPLIANCE_SELF {applianceSelf} RUN mkdir /var/lib/toil ENV TOIL_WORKDIR /var/lib/toil # This component changes most frequently and keeping it last maximizes Docker cache hits. COPY {sdistName} . RUN pip install {sdistName}[all] RUN rm {sdistName} # We intentionally inherit the default ENTRYPOINT and CMD from the base image, to the effect # that the running appliance just gives you a shell. To start the Mesos master or slave # daemons, the user # should override the entrypoint via --entrypoint. RUN echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/bash.bashrc \ && printf '{motd}' > /etc/motd '''))
[ [ [ 636, 650 ] ], [ [ 658, 660 ], [ 694, 696 ], [ 740, 742 ] ], [ [ 668, 676 ], [ 1965, 1973 ] ], [ [ 678, 691 ] ], [ [ 728, 737 ] ], [ [ 773, 785 ] ], [ [ 1945, 1952 ], [ 2059, 2066 ], [ 2569, 2576 ] ], [ [ 2052, 2056 ], [ 2543, 2547 ] ], [ [ 2505, 2509 ] ] ]
# mypy: allow-untyped-defs from unittest import mock import pytest from tools.ci.tc import decision @pytest.mark.parametrize("run_jobs,tasks,expected", [ ([], {"task-no-schedule-if": {}}, ["task-no-schedule-if"]), ([], {"task-schedule-if-no-run-job": {"schedule-if": {}}}, []), (["job"], {"job-present": {"schedule-if": {"run-job": ["other-job", "job"]}}}, ["job-present"]), (["job"], {"job-missing": {"schedule-if": {"run-job": ["other-job"]}}}, []), (["all"], {"job-all": {"schedule-if": {"run-job": ["other-job"]}}}, ["job-all"]), (["job"], {"job-1": {"schedule-if": {"run-job": ["job"]}}, "job-2": {"schedule-if": {"run-job": ["other-job"]}}}, ["job-1"]), ]) def test_filter_schedule_if(run_jobs, tasks, expected): with mock.patch("tools.ci.tc.decision.get_run_jobs", return_value=run_jobs) as get_run_jobs: assert (decision.filter_schedule_if({}, tasks) == {name: tasks[name] for name in expected}) get_run_jobs.call_count in (0, 1) @pytest.mark.parametrize("msg,expected", [ ("Some initial line\n\ntc-jobs:foo,bar", {"foo", "bar"}), ("Some initial line\n\ntc-jobs:foo, bar", {"foo", "bar"}), ("tc-jobs:foo, bar \nbaz", {"foo", "bar"}), ("tc-jobs:all", {"all"}), ("", set()), ("tc-jobs:foo\ntc-jobs:bar", {"foo"})]) @pytest.mark.parametrize("event", [ {"commits": [{"message": "<message>"}]}, {"pull_request": {"body": "<message>"}} ]) def test_extra_jobs_pr(msg, expected, event): def sub(obj): """Copy obj, except if it's a string with the value <message> replace it with the value of the msg argument""" if isinstance(obj, dict): return {key: sub(value) for (key, value) in obj.items()} elif isinstance(obj, list): return [sub(value) for value in obj] elif obj == "<message>": return msg return obj event = sub(event) assert decision.get_extra_jobs(event) == expected
[ [ [ 49, 53 ], [ 783, 787 ] ], [ [ 62, 68 ], [ 106, 112 ], [ 1052, 1058 ], [ 1361, 1367 ] ], [ [ 94, 102 ], [ 907, 915 ], [ 1978, 1986 ] ], [ [ 722, 745 ] ], [ [ 1492, 1510 ] ] ]
""" Main inputs: (Change for all fields) """ eazypath = '/data2/ken/photoz/eazy-photoz/src/eazy ' working_folder = '/data2/ken/EN1_pani' photometry_catalog = 'en1_phot_with_zspec.fits' photometry_format = 'fits' filter_file = 'EN1_filters.res' translate_file = 'EN1.translate' zspec_col = 'z_spec' flux_col = 'flux' fluxerr_col ='fluxerr' do_zp = False do_zp_tests = False do_subcats = False do_full = False do_stellar = False do_hb = True do_merge = True """ Training parameters """ Ncrossval = 1 test_fraction = 0.2 process_outliers = True correct_extinction = True """ Fitting Parameters (Change only when needed) """ # Templates: Any combination of 'eazy', 'swire', 'atlas' templates = ['eazy', 'atlas', 'cosmos']#, 'swire']#, 'cosmos', 'atlas'] #,'cosmos', 'atlas'] fitting_mode = ['a', '1', '1'] defaults = ['defaults/zphot.eazy', 'defaults/zphot.atlas', 'defaults/zphot.cosmos'] #'defaults/zphot.eazy', #'defaults/zphot.atlas', #'defaults/zphot.swire'] stellar_params = 'defaults/zphot.pickles' additional_errors = [0.0, 0.0, 0.0] template_error_norm = [1., 1., 1.] template_error_file = '' lambda_fit_max = [5., 30., 30.] """ Combination Parameters """ include_prior = True fbad_prior = 'mag' # 'flat', 'vol' or 'mag' #prior_parameter_path = 'en1_i_prior_coeff.npz' prior_fname = 'pani_mag' prior_colname = 'pani_mag' alpha_colname = 'pani_mag' """ System Parameters (Specific system only - fixed after installation) """ block_size = 1e4 ncpus = 10
[ [ [ 46, 54 ] ], [ [ 99, 113 ] ], [ [ 138, 156 ] ], [ [ 186, 203 ] ], [ [ 214, 225 ] ], [ [ 246, 260 ] ], [ [ 280, 289 ] ], [ [ 302, 310 ] ], [ [ 320, 331 ] ], [ [ 344, 349 ] ], [ [ 358, 369 ] ], [ [ 378, 388 ] ], [ [ 398, 405 ] ], [ [ 414, 424 ] ], [ [ 433, 438 ] ], [ [ 446, 454 ] ], [ [ 492, 501 ] ], [ [ 506, 519 ] ], [ [ 527, 543 ] ], [ [ 551, 569 ] ], [ [ 690, 699 ] ], [ [ 783, 795 ] ], [ [ 815, 823 ] ], [ [ 1034, 1048 ] ], [ [ 1077, 1094 ] ], [ [ 1113, 1132 ] ], [ [ 1148, 1167 ] ], [ [ 1173, 1187 ] ], [ [ 1240, 1253 ] ], [ [ 1261, 1271 ] ], [ [ 1353, 1364 ] ], [ [ 1378, 1391 ] ], [ [ 1405, 1418 ] ], [ [ 1512, 1522 ] ], [ [ 1529, 1534 ] ] ]
from bfstpw.models import ForumThread, ForumPost from datetime import datetime from django.conf import settings from django.core.paginator import Paginator from django.core.urlresolvers import reverse from django.db.models import Count from django.http import HttpResponseRedirect from django.shortcuts import render, get_object_or_404 from django.template import Context import django.utils.timezone from markdown import markdown def threadlist(request): c = Context({"threadlist" : [{"id":t.id ,"name":t.thread_title ,"poster":t.getOriginalPost().poster ,"replycount":t.postcount ,"lastpage":(t.postcount / getattr(settings, 'BFSTPW_MAX_POSTS_PER_PAGE', 20))+1 ,"date":t.mostrecent ,"lastposter":t.getLatestPost().poster } for t in ForumThread.objects.sortByLastPost().annotate(postcount=Count('forumpost'))]}) return render(request, 'bfstpw/threadlist.html', c) def thread(request, thread_id, message=''): current_thread = get_object_or_404(ForumThread, id=thread_id) max_posts_per_page = getattr(settings, 'BFSTPW_MAX_POSTS_PER_PAGE', 20) paginator = Paginator(current_thread.forumpost_set.order_by('date_posted'), max_posts_per_page) c = Context( {"threadlist" : [{"id":t.id ,"name":t.thread_title } for t in ForumThread.objects.sortByLastPost()], "thread" : current_thread, "posts" : paginator.page(request.GET.get('page',1)), "pages" : paginator.page_range, "message" : message }) return render(request, 'bfstpw/thread.html', c) def post(request, thread_id): t = get_object_or_404(ForumThread, id=thread_id) posts = t.forumpost_set """ # TODO: don't let users post too quickly session = request.session current_time = time() if (session.get('lastposttime',0) + 10) < current_time: message_html = markdown(request.POST['message'], safe_mode='escape') posts.create(poster=request.POST['name'],message_body=message_html,date_posted=datetime.now()) msg = '' session['lastposttime'] = current_time else: msg = "Error: you must wait 10 seconds before posting" """ message_html = markdown(request.POST['message'], safe_mode='escape') posts.create(poster=request.POST['name'], message_body=message_html, date_posted=django.utils.timezone.now()) pagenum = (posts.count() / getattr(settings, 'BFSTPW_MAX_POSTS_PER_PAGE', 20))+1 return HttpResponseRedirect(reverse('bfstpw-thread', args=(t.id,))+'?page=%d' % pagenum) def newthreadmake(request): t = ForumThread(thread_title=request.POST['threadname']) t.save() message_html = markdown(request.POST['message'], safe_mode='escape') t.forumpost_set.create(poster=request.POST['name'], message_body=message_html, date_posted=django.utils.timezone.now()) return HttpResponseRedirect(reverse('bfstpw-thread', args=(t.id,)))
[ [ [ 26, 37 ], [ 822, 833 ], [ 1042, 1053 ], [ 1386, 1397 ], [ 1745, 1756 ], [ 2711, 2722 ] ], [ [ 39, 48 ] ], [ [ 70, 78 ] ], [ [ 103, 111 ], [ 666, 674 ], [ 1102, 1110 ], [ 2535, 2543 ] ], [ [ 146, 155 ], [ 1161, 1170 ] ], [ [ 193, 200 ], [ 2613, 2620 ], [ 3031, 3038 ] ], [ [ 230, 235 ], [ 878, 883 ] ], [ [ 260, 280 ], [ 2592, 2612 ], [ 3010, 3030 ] ], [ [ 310, 316 ], [ 913, 919 ], [ 1647, 1653 ] ], [ [ 318, 335 ], [ 1024, 1041 ], [ 1727, 1744 ] ], [ [ 364, 371 ], [ 465, 472 ], [ 1253, 1260 ] ], [ [ 379, 400 ], [ 2467, 2473 ], [ 2969, 2975 ] ], [ [ 422, 430 ], [ 2316, 2324 ], [ 2796, 2804 ] ], [ [ 436, 446 ] ], [ [ 963, 969 ] ], [ [ 1693, 1697 ] ], [ [ 2679, 2692 ] ] ]
from lib.utils.util import * from timm.models.efficientnet_blocks import * # ChildNet Builder definition. class ChildNetBuilder: def __init__( self, channel_multiplier=1.0, channel_divisor=8, channel_min=None, output_stride=32, pad_type='', act_layer=None, se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, drop_path_rate=0., feature_location='', verbose=False, logger=None): self.channel_multiplier = channel_multiplier self.channel_divisor = channel_divisor self.channel_min = channel_min self.output_stride = output_stride self.pad_type = pad_type self.act_layer = act_layer self.se_kwargs = se_kwargs self.norm_layer = norm_layer self.norm_kwargs = norm_kwargs self.drop_path_rate = drop_path_rate self.feature_location = feature_location assert feature_location in ('pre_pwl', 'post_exp', '') self.verbose = verbose self.in_chs = None self.features = OrderedDict() self.logger = logger def _round_channels(self, chs): return round_channels( chs, self.channel_multiplier, self.channel_divisor, self.channel_min) def _make_block(self, ba, block_idx, block_count): drop_path_rate = self.drop_path_rate * block_idx / block_count bt = ba.pop('block_type') ba['in_chs'] = self.in_chs ba['out_chs'] = self._round_channels(ba['out_chs']) if 'fake_in_chs' in ba and ba['fake_in_chs']: ba['fake_in_chs'] = self._round_channels(ba['fake_in_chs']) ba['norm_layer'] = self.norm_layer ba['norm_kwargs'] = self.norm_kwargs ba['pad_type'] = self.pad_type # block act fn overrides the model default ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer assert ba['act_layer'] is not None if bt == 'ir': ba['drop_path_rate'] = drop_path_rate ba['se_kwargs'] = self.se_kwargs if self.verbose: self.logger.info( ' InvertedResidual {}, Args: {}'.format( block_idx, str(ba))) block = InvertedResidual(**ba) elif bt == 'ds' or bt == 'dsa': ba['drop_path_rate'] = drop_path_rate ba['se_kwargs'] = self.se_kwargs if self.verbose: self.logger.info( ' DepthwiseSeparable {}, Args: {}'.format( block_idx, str(ba))) block = DepthwiseSeparableConv(**ba) elif bt == 'cn': if self.verbose: self.logger.info( ' ConvBnAct {}, Args: {}'.format( block_idx, str(ba))) block = ConvBnAct(**ba) else: assert False, 'Uknkown block type (%s) while building model.' % bt self.in_chs = ba['out_chs'] # update in_chs for arg of next block return block def __call__(self, in_chs, model_block_args): """ Build the blocks Args: in_chs: Number of input-channels passed to first block model_block_args: A list of lists, outer list defines stages, inner list contains strings defining block configuration(s) Return: List of block stacks (each stack wrapped in nn.Sequential) """ if self.verbose: self.logger.info( 'Building model trunk with %d stages...' % len(model_block_args)) self.in_chs = in_chs total_block_count = sum([len(x) for x in model_block_args]) total_block_idx = 0 current_stride = 2 current_dilation = 1 feature_idx = 0 stages = [] # outer list of block_args defines the stacks ('stages' by some # conventions) for stage_idx, stage_block_args in enumerate(model_block_args): last_stack = stage_idx == (len(model_block_args) - 1) if self.verbose: self.logger.info('Stack: {}'.format(stage_idx)) assert isinstance(stage_block_args, list) blocks = [] # each stack (stage) contains a list of block arguments for block_idx, block_args in enumerate(stage_block_args): last_block = block_idx == (len(stage_block_args) - 1) extract_features = '' # No features extracted if self.verbose: self.logger.info(' Block: {}'.format(block_idx)) # Sort out stride, dilation, and feature extraction details assert block_args['stride'] in (1, 2) if block_idx >= 1: # only the first block in any stack can have a stride > 1 block_args['stride'] = 1 do_extract = False if self.feature_location == 'pre_pwl': if last_block: next_stage_idx = stage_idx + 1 if next_stage_idx >= len(model_block_args): do_extract = True else: do_extract = model_block_args[next_stage_idx][0]['stride'] > 1 elif self.feature_location == 'post_exp': if block_args['stride'] > 1 or (last_stack and last_block): do_extract = True if do_extract: extract_features = self.feature_location next_dilation = current_dilation if block_args['stride'] > 1: next_output_stride = current_stride * block_args['stride'] if next_output_stride > self.output_stride: next_dilation = current_dilation * block_args['stride'] block_args['stride'] = 1 if self.verbose: self.logger.info( ' Converting stride to dilation to maintain output_stride=={}'.format( self.output_stride)) else: current_stride = next_output_stride block_args['dilation'] = current_dilation if next_dilation != current_dilation: current_dilation = next_dilation # create the block block = self._make_block( block_args, total_block_idx, total_block_count) blocks.append(block) # stash feature module name and channel info for model feature # extraction if extract_features: feature_module = block.feature_module(extract_features) if feature_module: feature_module = 'blocks.{}.{}.'.format( stage_idx, block_idx) + feature_module feature_channels = block.feature_channels(extract_features) self.features[feature_idx] = dict( name=feature_module, num_chs=feature_channels ) feature_idx += 1 # incr global block idx (across all stacks) total_block_idx += 1 stages.append(nn.Sequential(*blocks)) return stages
[ [ [ 27, 28 ] ], [ [ 74, 75 ], [ 398, 400 ], [ 1161, 1172 ], [ 1256, 1270 ], [ 2394, 2410 ], [ 2744, 2766 ], [ 2981, 2990 ], [ 7585, 7587 ] ], [ [ 114, 129 ] ] ]
# coding: utf-8 # Copyright 2015 Eezee-It import json import logging from hashlib import sha256 import urlparse from odoo import models, fields, api from odoo.tools.float_utils import float_compare from odoo.tools.translate import _ from odoo.addons.payment.models.payment_acquirer import ValidationError from odoo.addons.payment_sips.controllers.main import SipsController _logger = logging.getLogger(__name__) CURRENCY_CODES = { 'EUR': '978', 'USD': '840', 'CHF': '756', 'GBP': '826', 'CAD': '124', 'JPY': '392', 'MXN': '484', 'TRY': '949', 'AUD': '036', 'NZD': '554', 'NOK': '578', 'BRL': '986', 'ARS': '032', 'KHR': '116', 'TWD': '901', } class AcquirerSips(models.Model): _inherit = 'payment.acquirer' provider = fields.Selection(selection_add=[('sips', 'Sips')]) sips_merchant_id = fields.Char('SIPS API User Password', required_if_provider='sips', groups='base.group_user') sips_secret = fields.Char('SIPS Secret', size=64, required_if_provider='sips', groups='base.group_user') def _get_sips_urls(self, environment): """ Worldline SIPS URLS """ url = { 'prod': 'https://payment-webinit.sips-atos.com/paymentInit', 'test': 'https://payment-webinit.simu.sips-atos.com/paymentInit', } return {'sips_form_url': url.get(environment, url['test']), } def _sips_generate_shasign(self, values): """ Generate the shasign for incoming or outgoing communications. :param dict values: transaction values :return string: shasign """ if self.provider != 'sips': raise ValidationError(_('Incorrect payment acquirer provider')) data = values['Data'] # Test key provided by Worldine key = u'002001000000001_KEY1' if self.environment == 'prod': key = getattr(self, 'sips_secret') shasign = sha256(data + key) return shasign.hexdigest() @api.multi def sips_form_generate_values(self, values): self.ensure_one() base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url') currency = self.env['res.currency'].sudo().browse(values['currency_id']) currency_code = CURRENCY_CODES.get(currency.name, False) if not currency_code: raise ValidationError(_('Currency not supported by Wordline')) amount = int(values['amount'] * 100) if self.environment == 'prod': # For production environment, key version 2 is required merchant_id = getattr(self, 'sips_merchant_id') key_version = '2' else: # Test key provided by Atos Wordline works only with version 1 merchant_id = '002001000000001' key_version = '1' sips_tx_values = dict(values) sips_tx_values.update({ 'Data': u'amount=%s|' % amount + u'currencyCode=%s|' % currency_code + u'merchantId=%s|' % merchant_id + u'normalReturnUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) + u'automaticResponseUrl=%s|' % urlparse.urljoin(base_url, SipsController._return_url) + u'transactionReference=%s|' % values['reference'] + u'statementReference=%s|' % values['reference'] + u'keyVersion=%s' % key_version, 'InterfaceVersion': 'HP_2.3', }) return_context = {} if sips_tx_values.get('return_url'): return_context[u'return_url'] = u'%s' % sips_tx_values.pop('return_url') return_context[u'reference'] = u'%s' % sips_tx_values['reference'] sips_tx_values['Data'] += u'|returnContext=%s' % (json.dumps(return_context)) shasign = self._sips_generate_shasign(sips_tx_values) sips_tx_values['Seal'] = shasign return sips_tx_values @api.multi def sips_get_form_action_url(self): self.ensure_one() return self._get_sips_urls(self.environment)['sips_form_url'] class TxSips(models.Model): _inherit = 'payment.transaction' _sips_valid_tx_status = ['00'] _sips_wait_tx_status = ['90', '99'] _sips_refused_tx_status = ['05', '14', '34', '54', '75', '97'] _sips_error_tx_status = ['03', '12', '24', '25', '30', '40', '51', '63', '94'] _sips_pending_tx_status = ['60'] _sips_cancel_tx_status = ['17'] # -------------------------------------------------- # FORM RELATED METHODS # -------------------------------------------------- def _sips_data_to_object(self, data): res = {} for element in data.split('|'): element_split = element.split('=') res[element_split[0]] = element_split[1] return res @api.model def _sips_form_get_tx_from_data(self, data): """ Given a data dict coming from sips, verify it and find the related transaction record. """ data = self._sips_data_to_object(data.get('Data')) reference = data.get('transactionReference') if not reference: custom = json.loads(data.pop('returnContext', False) or '{}') reference = custom.get('reference') payment_tx = self.search([('reference', '=', reference)]) if not payment_tx or len(payment_tx) > 1: error_msg = _('Sips: received data for reference %s') % reference if not payment_tx: error_msg += _('; no order found') else: error_msg += _('; multiple order found') _logger.error(error_msg) raise ValidationError(error_msg) return payment_tx @api.multi def _sips_form_get_invalid_parameters(self, data): invalid_parameters = [] data = self._sips_data_to_object(data.get('Data')) # TODO: txn_id: should be false at draft, set afterwards, and verified with txn details if self.acquirer_reference and data.get('transactionReference') != self.acquirer_reference: invalid_parameters.append(('transactionReference', data.get('transactionReference'), self.acquirer_reference)) # check what is bought if float_compare(float(data.get('amount', '0.0')) / 100, self.amount, 2) != 0: invalid_parameters.append(('amount', data.get('amount'), '%.2f' % self.amount)) if self.partner_reference and data.get('customerId') != self.partner_reference: invalid_parameters.append(('customerId', data.get('customerId'), self.partner_reference)) return invalid_parameters @api.multi def _sips_form_validate(self, data): data = self._sips_data_to_object(data.get('Data')) status = data.get('responseCode') data = { 'acquirer_reference': data.get('transactionReference'), 'partner_reference': data.get('customerId'), 'date_validate': data.get('transactionDateTime', fields.Datetime.now()) } res = False if status in self._sips_valid_tx_status: msg = 'Payment for tx ref: %s, got response [%s], set as done.' % \ (self.reference, status) _logger.info(msg) data.update(state='done', state_message=msg) res = True elif status in self._sips_error_tx_status: msg = 'Payment for tx ref: %s, got response [%s], set as ' \ 'error.' % (self.reference, status) data.update(state='error', state_message=msg) elif status in self._sips_wait_tx_status: msg = 'Received wait status for payment ref: %s, got response ' \ '[%s], set as error.' % (self.reference, status) data.update(state='error', state_message=msg) elif status in self._sips_refused_tx_status: msg = 'Received refused status for payment ref: %s, got response' \ ' [%s], set as error.' % (self.reference, status) data.update(state='error', state_message=msg) elif status in self._sips_pending_tx_status: msg = 'Payment ref: %s, got response [%s] set as pending.' \ % (self.reference, status) data.update(state='pending', state_message=msg) elif status in self._sips_cancel_tx_status: msg = 'Received notification for payment ref: %s, got response ' \ '[%s], set as cancel.' % (self.reference, status) data.update(state='cancel', state_message=msg) else: msg = 'Received unrecognized status for payment ref: %s, got ' \ 'response [%s], set as error.' % (self.reference, status) data.update(state='error', state_message=msg) _logger.info(msg) self.write(data) return res
[ [ [ 51, 55 ], [ 3791, 3795 ], [ 5169, 5173 ] ], [ [ 63, 70 ], [ 388, 395 ] ], [ [ 91, 97 ], [ 1930, 1936 ] ], [ [ 105, 113 ], [ 3088, 3096 ], [ 3195, 3203 ] ], [ [ 132, 138 ], [ 730, 736 ], [ 4120, 4126 ] ], [ [ 140, 146 ], [ 795, 801 ], [ 869, 875 ], [ 980, 986 ], [ 7047, 7053 ] ], [ [ 148, 151 ], [ 1990, 1993 ], [ 3959, 3962 ], [ 4838, 4841 ], [ 5736, 5739 ], [ 6654, 6657 ] ], [ [ 187, 200 ], [ 6255, 6268 ] ], [ [ 234, 235 ], [ 1673, 1674 ], [ 2369, 2370 ], [ 5411, 5412 ], [ 5525, 5526 ], [ 5594, 5595 ] ], [ [ 292, 307 ], [ 1657, 1672 ], [ 2353, 2368 ], [ 5677, 5692 ] ], [ [ 362, 376 ], [ 3115, 3129 ], [ 3222, 3236 ] ], [ [ 378, 385 ], [ 5634, 5641 ], [ 7284, 7291 ], [ 8853, 8860 ] ], [ [ 418, 432 ], [ 2264, 2278 ] ], [ [ 717, 729 ] ], [ [ 4113, 4119 ] ] ]
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Tests for Generalized Pareto distribution.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys # Dependency imports import hypothesis as hp import hypothesis.strategies as hps import numpy as np from scipy import stats as sp_stats import tensorflow.compat.v2 as tf import tensorflow_probability as tfp from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps from tensorflow_probability.python.internal import test_util tfd = tfp.distributions # Pylint doesn't understand hps.composite. # pylint: disable=no-value-for-parameter @hps.composite def generalized_paretos(draw, batch_shape=None): if batch_shape is None: batch_shape = draw(tfp_hps.shapes()) constraints = dict( loc=tfp_hps.identity_fn, scale=tfp_hps.softplus_plus_eps(), concentration=lambda x: tf.math.tanh(x) * 0.24) # <.25==safe for variance params = draw( tfp_hps.broadcasting_params( batch_shape, params_event_ndims=dict(loc=0, scale=0, concentration=0), constraint_fn_for=constraints.get)) dist = tfd.GeneralizedPareto(validate_args=draw(hps.booleans()), **params) if dist.batch_shape != batch_shape: raise AssertionError('batch_shape mismatch: expect {} but got {}'.format( batch_shape, dist)) return dist @test_util.test_all_tf_execution_regimes class GeneralizedParetoTest(test_util.TestCase): @hp.given(generalized_paretos()) @tfp_hps.tfp_hp_settings(default_max_examples=5) def testShape(self, dist): # batch_shape == dist.batch_shape asserted in generalized_paretos() self.assertEqual(dist.batch_shape, self.evaluate(dist.batch_shape_tensor())) self.assertEqual(tf.TensorShape([]), dist.event_shape) self.assertAllEqual([], self.evaluate(dist.event_shape_tensor())) @hp.given(generalized_paretos(batch_shape=[])) @tfp_hps.tfp_hp_settings(default_max_examples=5) def testLogPDF(self, dist): xs = self.evaluate(dist.sample()) logp = dist.log_prob(xs) self.assertEqual(dist.batch_shape, logp.shape) p = dist.prob(xs) self.assertEqual(dist.batch_shape, p.shape) loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration]) expected_logp = sp_stats.genpareto(conc, loc=loc, scale=scale).logpdf(xs) actual_logp = self.evaluate(logp) self.assertAllClose(expected_logp, actual_logp, rtol=1e-5) self.assertAllClose(np.exp(expected_logp), self.evaluate(p), rtol=1e-5) def testLogPDFBoundary(self): # When loc = concentration = 0, we have an exponential distribution. Check # that at 0 we have finite log prob. scale = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32) dist = tfd.GeneralizedPareto(loc=0, scale=scale, concentration=0) log_pdf = dist.log_prob(0.) self.assertAllClose(-np.log(scale), self.evaluate(log_pdf), rtol=1e-5) @hp.given(generalized_paretos(batch_shape=[])) @tfp_hps.tfp_hp_settings(default_max_examples=5) def testCDF(self, dist): xs = self.evaluate(dist.sample()) cdf = dist.cdf(xs) self.assertEqual(dist.batch_shape, cdf.shape) loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration]) expected_cdf = sp_stats.genpareto(conc, loc=loc, scale=scale).cdf(xs) self.assertAllClose(expected_cdf, self.evaluate(cdf), rtol=5e-5) @hp.given(generalized_paretos(batch_shape=[])) @tfp_hps.tfp_hp_settings(default_max_examples=5) def testMean(self, dist): loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration]) self.assertEqual(dist.batch_shape, dist.mean().shape) if np.abs(conc) < 1e-5 and conc != 0: return # scipy does badly at small nonzero concentrations. expected = sp_stats.genpareto(conc, loc=loc, scale=scale).mean() actual = self.evaluate(dist.mean()) self.assertAllClose(expected, actual, rtol=5e-4) @hp.given(generalized_paretos(batch_shape=[])) @tfp_hps.tfp_hp_settings(default_max_examples=5) def testVariance(self, dist): loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration]) self.assertEqual(dist.batch_shape, dist.variance().shape) expected = sp_stats.genpareto(conc, loc=loc, scale=scale).var() if np.abs(conc) < 1e-4 and conc != 0: return # scipy does badly at small nonzero concentrations. if expected <= 0: return # scipy sometimes returns nonsense zero or negative variances. actual = self.evaluate(dist.variance()) print('var', loc, scale, conc, expected, actual, file=sys.stderr) self.assertAllClose(expected, actual, rtol=.01) @hp.given(generalized_paretos(batch_shape=[])) @tfp_hps.tfp_hp_settings(default_max_examples=5) def testEntropy(self, dist): loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration]) self.assertEqual(dist.batch_shape, dist.entropy().shape) expected = sp_stats.genpareto.entropy(conc, loc=loc, scale=scale) actual = self.evaluate(dist.entropy()) self.assertAllClose(expected, actual) def testSample(self): loc = np.float32(-7.5) scale = np.float32(3.5) conc = np.float32(0.07) n = 100000 dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc) samples = dist.sample(n, seed=test_util.test_seed()) sample_values = self.evaluate(samples) self.assertEqual((n,), samples.shape) self.assertEqual((n,), sample_values.shape) self.assertTrue(self._kstest(loc, scale, conc, sample_values)) self.assertAllClose( sp_stats.genpareto.mean(conc, loc=loc, scale=scale), sample_values.mean(), rtol=.005) self.assertAllClose( sp_stats.genpareto.var(conc, loc=loc, scale=scale), sample_values.var(), rtol=.01) def testFullyReparameterized(self): loc = tf.constant(4.0) scale = tf.constant(3.0) conc = tf.constant(2.0) _, grads = tfp.math.value_and_gradient( lambda *args: tfd.GeneralizedPareto(*args).sample(100), [loc, scale, conc]) self.assertLen(grads, 3) self.assertAllNotNone(grads) def testSampleKolmogorovSmirnovMultiDimensional(self): loc = np.linspace(-10, 10, 3).reshape(3, 1, 1) scale = np.linspace(1e-6, 7, 5).reshape(5, 1) conc = np.linspace(-1.3, 1.3, 7) dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc) n = 10000 samples = dist.sample(n, seed=test_util.test_seed()) sample_values = self.evaluate(samples) self.assertEqual((n, 3, 5, 7), samples.shape) self.assertEqual((n, 3, 5, 7), sample_values.shape) fails = 0 trials = 0 for li, l in enumerate(loc.reshape(-1)): for si, s in enumerate(scale.reshape(-1)): for ci, c in enumerate(conc.reshape(-1)): samps = sample_values[:, li, si, ci] trials += 1 fails += 0 if self._kstest(l, s, c, samps) else 1 self.assertLess(fails, trials * 0.01) def _kstest(self, loc, scale, conc, samples): # Uses the Kolmogorov-Smirnov test for goodness of fit. ks, _ = sp_stats.kstest(samples, sp_stats.genpareto(conc, loc=loc, scale=scale).cdf) # Return True when the test passes. return ks < 0.02 def testPdfOfSampleMultiDims(self): dist = tfd.GeneralizedPareto( loc=0, scale=[[2.], [3.]], concentration=[-.37, .11]) num = 50000 samples = dist.sample(num, seed=test_util.test_seed()) pdfs = dist.prob(samples) sample_vals, pdf_vals = self.evaluate([samples, pdfs]) self.assertEqual((num, 2, 2), samples.shape) self.assertEqual((num, 2, 2), pdfs.shape) self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02) self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02) self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02) self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02) def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3): s_p = zip(sample_vals, pdf_vals) prev = (0, 0) total = 0 for k in sorted(s_p, key=lambda x: x[0]): pair_pdf = (k[1] + prev[1]) / 2 total += (k[0] - prev[0]) * pair_pdf prev = k self.assertNear(1., total, err=err) def testNonPositiveInitializationParamsRaises(self): scale = tf.constant(0.0, name='scale') with self.assertRaisesOpError('Argument `scale` must be positive.'): dist = tfd.GeneralizedPareto( loc=0, scale=scale, concentration=1, validate_args=True) self.evaluate(dist.mean()) def testGradientThroughConcentration(self): concentration = tf.Variable(3.) d = tfd.GeneralizedPareto(loc=0, scale=1, concentration=concentration) with tf.GradientTape() as tape: loss = -d.log_prob([1., 2., 4.]) grad = tape.gradient(loss, d.trainable_variables) self.assertLen(grad, 1) self.assertAllNotNone(grad) def testAssertsPositiveScale(self): scale = tf.Variable([1., 2., -3.]) self.evaluate(scale.initializer) with self.assertRaisesOpError('Argument `scale` must be positive.'): d = tfd.GeneralizedPareto( loc=0, scale=scale, concentration=1, validate_args=True) self.evaluate(d.sample()) def testAssertsPositiveScaleAfterMutation(self): scale = tf.Variable([1., 2., 3.]) self.evaluate(scale.initializer) d = tfd.GeneralizedPareto( loc=0, scale=scale, concentration=0.25, validate_args=True) self.evaluate(d.mean()) with self.assertRaisesOpError('Argument `scale` must be positive.'): with tf.control_dependencies([scale.assign([1., 2., -3.])]): self.evaluate(d.sample()) def testGradientThroughLocScale(self): loc = tf.Variable(1.) scale = tf.Variable(2.5) d = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=.15) with tf.GradientTape() as tape: loss = -d.log_prob([1., 2., 4.]) grads = tape.gradient(loss, d.trainable_variables) self.assertLen(grads, 2) self.assertAllNotNone(grads) if __name__ == '__main__': tf.test.main()
[ [ [ 751, 766 ] ], [ [ 790, 798 ] ], [ [ 822, 836 ] ], [ [ 845, 848 ], [ 5246, 5249 ] ], [ [ 878, 894 ], [ 2142, 2144 ], [ 2540, 2542 ], [ 3595, 3597 ], [ 4059, 4061 ], [ 4597, 4599 ], [ 5314, 5316 ] ], [ [ 902, 930 ], [ 1315, 1318 ], [ 1861, 1864 ] ], [ [ 938, 949 ], [ 3141, 3143 ], [ 3358, 3360 ], [ 3402, 3404 ], [ 3541, 3543 ], [ 4330, 4332 ], [ 4944, 4946 ], [ 5774, 5776 ], [ 5803, 5805 ], [ 5830, 5832 ], [ 6850, 6852 ], [ 6903, 6905 ], [ 6952, 6954 ] ], [ [ 968, 985 ], [ 2958, 2966 ], [ 3931, 3939 ], [ 4446, 4454 ], [ 4884, 4892 ], [ 5599, 5607 ], [ 6227, 6235 ], [ 6362, 6370 ], [ 7740, 7748 ], [ 7793, 7801 ] ], [ [ 994, 1020 ], [ 10712, 10714 ], [ 2428, 2430 ], [ 6510, 6512 ], [ 6539, 6541 ], [ 6567, 6569 ], [ 8986, 8988 ], [ 9293, 9295 ], [ 9393, 9395 ], [ 9624, 9626 ], [ 9957, 9959 ], [ 10231, 10233 ], [ 10373, 10375 ], [ 10401, 10403 ], [ 10498, 10500 ], [ 1570, 1572 ] ], [ [ 1028, 1057 ], [ 1208, 1211 ], [ 6599, 6602 ] ], [ [ 1110, 1139 ], [ 2177, 2184 ], [ 2589, 2596 ], [ 3644, 3651 ], [ 4108, 4115 ], [ 4646, 4653 ], [ 5363, 5370 ], [ 1427, 1434 ], [ 1478, 1485 ], [ 1511, 1518 ], [ 1645, 1652 ] ], [ [ 1191, 1200 ], [ 2117, 2126 ], [ 2049, 2058 ], [ 5971, 5980 ], [ 7102, 7111 ], [ 8093, 8102 ] ], [ [ 1202, 1205 ], [ 1820, 1823 ], [ 3425, 3428 ], [ 5873, 5876 ], [ 6990, 6993 ], [ 7956, 7959 ], [ 9103, 9106 ], [ 9317, 9320 ], [ 9771, 9774 ], [ 10028, 10031 ], [ 10426, 10429 ], [ 6650, 6653 ] ], [ [ 1333, 1352 ], [ 2151, 2170 ], [ 2549, 2568 ], [ 3604, 3623 ], [ 4068, 4087 ], [ 4606, 4625 ], [ 5323, 5342 ] ], [ [ 2095, 2116 ] ] ]
# # vect3dotfun.py # Dot product of two 3-d vectors using function in Python 3.7 # # Sparisoma Viridi | https://github.com/dudung # # 20210110 # 2001 Start creating this example. # 2002 Test it and ok. # # Define dot function with two arguments def dot(a, b): p = a[0] * b[0] + a[1] * b[1] + a[2] * b[2] return p # Define two vector using array r1 = [1, 2, 3] r2 = [2, 2, 9] # Calculate dot product of two vectors p = dot(r1, r2) # Display result print("r1 = ", r1, sep=""); print("r2 = ", r2, sep=""); print("p = r1 \xb7 r2 = ", p, sep="");
[ [ [ 253, 256 ], [ 426, 429 ] ], [ [ 352, 354 ], [ 430, 432 ], [ 471, 473 ] ], [ [ 367, 369 ], [ 434, 436 ], [ 499, 501 ] ], [ [ 422, 423 ], [ 539, 540 ] ] ]
from typing import Any, Dict, Union, Optional from dataclasses import asdict, dataclass Headers = Optional[Dict[str, Union[str, bool, int]]] @dataclass class APIGatewayProxyResult: """ Key names are expected and given by AWS APIGateway specifications and must not be changed """ statusCode: int body: Union[str, Dict[str, Any]] headers: Headers = None multiValueHeaders: Headers = None isBase64Encoded: Optional[bool] = None def asdict(self): return {k: v for k, v in asdict(self).items() if v is not None}
[ [ [ 19, 22 ], [ 345, 348 ] ], [ [ 24, 28 ], [ 108, 112 ], [ 335, 339 ] ], [ [ 30, 35 ], [ 118, 123 ], [ 324, 329 ] ], [ [ 37, 45 ], [ 99, 107 ], [ 438, 446 ] ], [ [ 70, 76 ], [ 516, 522 ] ], [ [ 78, 87 ], [ 145, 154 ] ], [ [ 89, 96 ], [ 364, 371 ], [ 402, 409 ] ], [ [ 161, 182 ] ] ]
class Status: OK = "OK" ERROR = "ERROR" class Response(dict): def __init__(self, status, data): super().__init__() self["status"] = status self["data"] = data
[ [ [ 6, 12 ] ], [ [ 56, 64 ] ] ]
# Copyright (c) 2022 Andreas Törnkvist | MIT License import math class worldfile: def __init__(self, filename): wFile = open(filename) w = wFile.readlines() w = [line.rstrip() for line in w] self.A = float(w[0]) self.D = float(w[1]) self.B = float(w[2]) self.E = float(w[3]) self.C = float(w[4]) self.F = float(w[5]) Xv = math.atan(self.D/self.A) Yv = math.atan(self.B/self.E) self.Xx = (math.cos(Xv) ** 2) / self.A self.Xy = (math.cos(Xv) * math.sin(Xv)) / self.A self.Yy = (math.cos(Yv) ** 2) / self.E self.Yx = (math.cos(Yv) * math.sin(Yv)) / self.E def coordToPx(self, lon, lat): Dx = lon - self.C Dy = lat - self.F Px = (Dx * self.Xx) + (Dy * self.Yx) Py = (Dx * self.Xy) + (Dy * self.Yy) return(Px, Py)
[ [ [ 61, 65 ], [ 410, 414 ], [ 448, 452 ], [ 493, 497 ], [ 540, 544 ], [ 555, 559 ], [ 597, 601 ], [ 644, 648 ], [ 659, 663 ] ], [ [ 73, 82 ] ] ]
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, unused-argument """A parser for Relay's text format.""" from __future__ import absolute_import import sys from ast import literal_eval from collections import deque import tvm from . import module from .base import Span, SourceName from . import expr from . import ty from . import op PYTHON_VERSION = sys.version_info.major try: from .grammar.py3.RelayVisitor import RelayVisitor from .grammar.py3.RelayParser import RelayParser from .grammar.py3.RelayLexer import RelayLexer except ImportError: raise Exception("Couldn't find ANTLR parser. Try building with USE_ANTLR=ON.") try: from antlr4 import InputStream, CommonTokenStream from antlr4.error.ErrorListener import ErrorListener except ImportError: raise Exception("Couldn't find ANTLR runtime." + "Try running `pip{version} install antlr4-python{version}-runtime`." .format(version=PYTHON_VERSION)) sys.setrecursionlimit(10000) class ParseError(Exception): """Exception type for parse errors.""" def __init__(self, message): # type: (str) -> None super(ParseError, self).__init__() self.message = message def __repr__(self): return "ParseError({})".format(self.message) def __str__(self): return repr(self) class OpWrapper: """Overload the __call__ for op.""" pass class ExprOp(OpWrapper): """Call an expr. The default, but does not handle attrs well.""" def __init__(self, operator): self.operator = operator def __call__(self, args, attrs, type_args): try: return expr.Call(self.operator, args, attrs, type_args) except Exception: raise Exception("Operator {} is not registered. It's attributes are {}" .format(self.operator, attrs)) class FuncOp(OpWrapper): """Convert the attrs, call the python function with the attrs passed in as keyword arguments. Tvm should provide this in the future, as this is pretty similar to what op.get is providing. """ def __init__(self, operator): self.operator = operator def convert(self, v): if isinstance(v, tuple): return tuple([self.convert(x) for x in v]) if isinstance(v, expr.Constant): return v.data.asnumpy().item() if isinstance(v, str): return v raise Exception(v) def __call__(self, args, attrs, type_args): if attrs is None: attrs = {} x = self.operator(*args, **{k: self.convert(v) for k, v in attrs.items()}) if isinstance(x, expr.TupleWrapper): x = x.astuple() return x BINARY_OPS = { RelayParser.MUL: op.multiply, RelayParser.DIV: op.divide, RelayParser.ADD: op.add, RelayParser.SUB: op.subtract, RelayParser.LT: op.less, RelayParser.GT: op.greater, RelayParser.LE: op.less_equal, RelayParser.GE: op.greater_equal, RelayParser.EQ: op.equal, RelayParser.NE: op.not_equal, } FUNC_OPS = { "nn.conv2d": op.nn.conv2d, "nn.batch_norm": op.nn.batch_norm, "nn.dense": op.nn.dense, "nn.bias_add": op.nn.bias_add, "nn.max_pool2d": op.nn.max_pool2d, "nn.global_max_pool2d": op.nn.global_max_pool2d, "nn.avg_pool2d": op.nn.avg_pool2d, "nn.global_avg_pool2d": op.nn.global_avg_pool2d, "nn.softmax": op.nn.softmax, "reshape": op.reshape, "nn.conv2d_transpose": op.nn.conv2d_transpose, "concatenate": op.concatenate, "nn.dropout": op.nn.dropout_raw, "zeros": op.zeros, "split": op.split, "cast": op.cast } TYPE_PREFIXES = [ "int", "uint", "float", "bool", ] T = ty.TypeVar("T") # Scope = Deque[Tuple[str, T]] # Scopes = Deque[Scope[T]] def lookup(scopes, name): # type: (Scopes[T], str) -> Optional[T] """Look up `name` in `scopes`.""" for scope in scopes: for key, val in scope: if key == name: return val return None def spanify(f): """A decorator which attaches span information to the value returned by calling `f`. Intended for use with the below AST visiting methods. The idea is that after we do the work of constructing the AST we attach Span information. """ def _wrapper(*args, **kwargs): # Assumes 0th arg is self and gets source_name from object. sn = args[0].source_name # Assumes 1st arg is an ANTLR parser context. ctx = args[1] ast = f(*args, **kwargs) line, col = ctx.getSourceInterval() sp = Span(sn, line, col) if isinstance(ast, tvm.relay.expr.TupleWrapper): ast = ast.astuple() ast.set_span(sp) return ast return _wrapper # TODO(@jmp): Use https://stackoverflow.com/q/13889941 # to figure out how to get ANTLR4 to be more unhappy about syntax errors class ParseTreeToRelayIR(RelayVisitor): """Parse Relay text format into Relay IR.""" def __init__(self, source_name): # type: (str) -> None self.source_name = source_name self.module = module.Module({}) # type: module.Module # Adding an empty scope allows naked lets without pain. self.var_scopes = deque([deque()]) # type: Scopes[expr.Var] self.global_var_scope = deque() # type: Scope[expr.GlobalVar] self.type_param_scopes = deque([deque()]) # type: Scopes[ty.TypeVar] self.graph_expr = [] # type: List[expr.Expr] super(ParseTreeToRelayIR, self).__init__() def enter_var_scope(self): # type: () -> None """Enter a new Var scope so it can be popped off later.""" self.var_scopes.appendleft(deque()) def exit_var_scope(self): # type: () -> Scope[expr.Var] """Pop off the current Var scope and return it.""" return self.var_scopes.popleft() def mk_var(self, name, type_): # type: (str, ty.Type) -> expr.Var """Create a new Var and add it to the Var scope.""" var = expr.Var(name, type_) self.var_scopes[0].appendleft((name, var)) return var def mk_global_var(self, name): # type: (str) -> expr.GlobalVar """Create a new GlobalVar and add it to the GlobalVar scope.""" var = expr.GlobalVar(name) self.global_var_scope.append((name, var)) return var def enter_type_param_scope(self): # type: () -> None """Enter a new TypeVar scope so it can be popped off later.""" self.type_param_scopes.appendleft(deque()) def exit_type_param_scope(self): # type: () -> Scope[ty.TypeVar] """Pop off the current TypeVar scope and return it.""" return self.type_param_scopes.popleft() def mk_typ(self, name, kind): # (str, ty.Kind) -> ty.TypeVar """Create a new TypeVar and add it to the TypeVar scope.""" typ = ty.TypeVar(name, kind) self.type_param_scopes[0].appendleft((name, typ)) return typ def visitProjection(self, ctx): return expr.TupleGetItem(self.visit(ctx.expr()), self.visit(ctx.NAT())) def visitTerminal(self, node): # type: (TerminalNode) -> Union[expr.Expr, int, float] """Visit lexer tokens that aren't ignored or visited by other functions.""" node_type = node.getSymbol().type node_text = node.getText() name = node_text[1:] # variables if node_type == RelayLexer.GLOBAL_VAR: return lookup(deque([self.global_var_scope]), node_text[1:]) if node_type == RelayLexer.LOCAL_VAR: # Remove the leading '%' and lookup the name. var = lookup(self.var_scopes, name) if var is None: raise ParseError("Couldn't resolve `{}`.".format(name)) return var if node_type == RelayLexer.GRAPH_VAR: try: return self.graph_expr[int(name)] except IndexError: raise ParseError("Couldn't resolve `{}`".format(name)) # data types if node_type == RelayLexer.NAT: return int(node_text) if node_type == RelayLexer.FLOAT: return float(node_text[:-1]) if node_type == RelayLexer.BOOL_LIT: if node_text == "True": return True if node_text == "False": return False raise ParseError("Unrecognized BOOL_LIT: `{}`".format(node_text)) if node_type == RelayLexer.QUOTED_STRING: return literal_eval(node_text) raise ParseError("todo: `{}`".format(node_text)) def visit_list(self, ctx_list): # type: (List[ParserRuleContext]) -> List[Any] """"Visit a list of contexts.""" assert isinstance(ctx_list, list) return [self.visit(ctx) for ctx in ctx_list] def getType_(self, ctx): # type: (Optional[RelayParser.Type_Context]) -> Optional[ty.Type] """Return a (possibly None) Relay type.""" if ctx is None: return None return self.visit(ctx) def visitProg(self, ctx): self.meta = None if ctx.METADATA(): header, data = str(ctx.METADATA()).split('\n', 1) assert header == "METADATA:" self.meta = tvm.load_json(data) # type: (RelayParser.ProgContext) -> Union[expr.Expr, module.Module] if ctx.defn(): self.visit_list(ctx.defn()) return self.module if ctx.expr(): return self.visit(ctx.expr()) return self.module # Exprs def visitOpIdent(self, ctx): # type: (RelayParser.OpIdentContext) -> op.Op op_name = ctx.CNAME().getText() if op_name in FUNC_OPS: return FuncOp(FUNC_OPS[op_name]) return ExprOp(op.get(op_name)) # pass through def visitParen(self, ctx): # type: (RelayParser.ParenContext) -> expr.Expr return self.visit(ctx.expr()) # pass through def visitBody(self, ctx): # type: (RelayParser.BodyContext) -> expr.Expr return self.visit(ctx.expr()) def visitScalarFloat(self, ctx): # type: (RelayParser.ScalarFloatContext) -> expr.Constant return expr.const(self.visit(ctx.FLOAT())) def visitScalarInt(self, ctx): # type: (RelayParser.ScalarIntContext) -> expr.Constant return expr.const(self.visit(ctx.NAT())) def visitScalarBool(self, ctx): # type: (RelayParser.ScalarBoolContext) -> expr.Constant return expr.const(self.visit(ctx.BOOL_LIT())) def visitNeg(self, ctx): # type: (RelayParser.NegContext) -> Union[expr.Constant, expr.Call] val = self.visit(ctx.expr()) if isinstance(val, expr.Constant) and val.data.asnumpy().ndim == 0: # fold Neg in for scalars return expr.const(-val.data.asnumpy().item()) return op.negative(val) def visitTuple(self, ctx): # type: (RelayParser.TupleContext) -> expr.Tuple tup = self.visit_list(ctx.expr()) return expr.Tuple(tup) def visitLet(self, ctx): # type: (RelayParser.SeqContext) -> expr.Let """Desugar various sequence constructs to Relay Let nodes.""" if ctx.var() is None: # anonymous identity ident = "_" type_ = None var = self.mk_var(ident, type_) else: var = self.visitVar(ctx.var()) self.enter_var_scope() value = self.visit(ctx.expr(0)) self.exit_var_scope() body = self.visit(ctx.expr(1)) return expr.Let(var, value, body) def visitBinOp(self, ctx): # type: (RelayParser.BinOpContext) -> expr.Call """Desugar binary operators.""" arg0, arg1 = self.visit_list(ctx.expr()) relay_op = BINARY_OPS.get(ctx.op.type) if relay_op is None: raise ParseError("Unimplemented binary op.") return relay_op(arg0, arg1) @spanify def visitVar(self, ctx): # type: (RelayParser.VarContext) -> expr.Var """Visit a single variable.""" ident = ctx.LOCAL_VAR() if ident is None: raise ParseError("Only local ids may be used in vars.") type_ = self.getType_(ctx.type_()) return self.mk_var(ident.getText()[1:], type_) def visitVarList(self, ctx): # type: (RelayParser.VarListContext) -> List[expr.Var] return self.visit_list(ctx.var()) # TODO: support a larger class of values than just Relay exprs def visitAttr(self, ctx): # type: (RelayParser.AttrContext) -> Tuple[str, expr.Expr] return (ctx.CNAME().getText(), self.visit(ctx.expr())) def visitArgNoAttr(self, ctx): return (self.visit_list(ctx.varList().var()), None) def visitAttrSeq(self, ctx): # type: (RelayParser.AttrListContext) -> Dict[str, expr.Expr] return dict(self.visit_list(ctx.attr())) def visitArgWithAttr(self, ctx): return (self.visit_list(ctx.var()), self.visitAttrSeq(ctx.attrSeq())) def visitArgList(self, ctx # type: RelayParser.ArgListContext ): # type: (...) -> Tuple[Optional[List[expr.Var]], Optional[Dict[str, expr.Expr]]] var_list = self.visit(ctx.varList()) if ctx.varList() else None attr_list = self.visit(ctx.attrList()) if ctx.attrList() else None return (var_list, attr_list) def visitMeta(self, ctx): type_key = str(ctx.CNAME()) index = int(self.visit(ctx.NAT())) return self.meta[type_key][index] def mk_func(self, ctx): # type: (Union[RelayParser.FuncContext, RelayParser.DefnContext]) -> expr.Function """Construct a function from either a Func or Defn.""" # Enter var scope early to put params in scope. self.enter_var_scope() # Capture type params in params. self.enter_type_param_scope() type_params = ctx.typeParamList() if type_params is not None: type_params = type_params.ident() assert type_params for ty_param in type_params: name = ty_param.getText() self.mk_typ(name, ty.Kind.Type) var_list, attr_list = self.visit(ctx.argList()) if var_list is None: var_list = [] ret_type = self.getType_(ctx.type_()) body = self.visit(ctx.body()) # NB(@jroesch): you must stay in the type parameter scope until # after you exit the body, you can reference the type parameters # of your parent scopes. type_params = list(self.exit_type_param_scope()) if type_params: _, type_params = zip(*type_params) self.exit_var_scope() attrs = tvm.make.node("DictAttrs", **attr_list) if attr_list is not None else None return expr.Function(var_list, body, ret_type, type_params, attrs) @spanify def visitFunc(self, ctx): # type: (RelayParser.FuncContext) -> expr.Function return self.mk_func(ctx) # TODO: how to set spans for definitions? # @spanify def visitDefn(self, ctx): # type: (RelayParser.DefnContext) -> None ident = ctx.ident().GLOBAL_VAR() if ident is None: raise ParseError("Only global ids may be used in `def`s.") ident_name = ident.getText()[1:] ident = self.mk_global_var(ident_name) self.module[ident] = self.mk_func(ctx) def visitCallNoAttr(self, ctx): return (self.visit_list(ctx.exprList().expr()), None) def visitCallWithAttr(self, ctx): return (self.visit_list(ctx.expr()), self.visit(ctx.attrSeq())) def call(self, func, args, attrs, type_args): if isinstance(func, OpWrapper): return func(args, attrs, type_args) return expr.Call(func, args, attrs, type_args) @spanify def visitCall(self, ctx): # type: (RelayParser.CallContext) -> expr.Call func = self.visit(ctx.expr()) args, attrs = self.visit(ctx.callList()) return self.call(func, args, attrs, []) @spanify def visitIfElse(self, ctx): # type: (RelayParser.IfElseContext) -> expr.If """Construct a Relay If node. Creates a new scope for each branch.""" cond = self.visit(ctx.expr()) self.enter_var_scope() true_branch = self.visit(ctx.body(0)) self.exit_var_scope() self.enter_var_scope() false_branch = self.visit(ctx.body(1)) self.exit_var_scope() return expr.If(cond, true_branch, false_branch) @spanify def visitGraph(self, ctx): # type: (RelayParser.GraphContext) -> expr.Expr """Visit a graph variable assignment.""" graph_nid = int(ctx.GRAPH_VAR().getText()[1:]) self.enter_var_scope() value = self.visit(ctx.expr(0)) self.exit_var_scope() if graph_nid != len(self.graph_expr): raise ParseError( "Expected new graph variable to be `%{}`,".format(len(self.graph_expr)) + \ "but got `%{}`".format(graph_nid)) self.graph_expr.append(value) kont = self.visit(ctx.expr(1)) return kont # Types # pylint: disable=unused-argument def visitIncompleteType(self, ctx): # type (RelayParser.IncompleteTypeContext) -> None: return None def visitTypeIdent(self, ctx): # type: (RelayParser.TypeIdentContext) -> Union[ty.TensorType, str] ''' Handle type identifier. ''' type_ident = ctx.CNAME().getText() # Look through all type prefixes for a match for type_prefix in TYPE_PREFIXES: if type_ident.startswith(type_prefix): return ty.scalar_type(type_ident) type_param = lookup(self.type_param_scopes, type_ident) if type_param is not None: return type_param raise ParseError("Unknown builtin type: {}".format(type_ident)) # def visitCallType(self, ctx): # # type: (RelayParser.CallTypeContext) -> Union[expr.Expr, ty.TensorType] # ident_type = ctx.identType().CNAME().getText() # args = self.visit_list(ctx.type_()) # if not args: # raise ParseError("Type-level functions must have arguments!") # func_type = TYPE_FUNCS.get(ident_type)(args) # if func_type is None: # raise ParseError("Unknown type-level function: `{}`".format(ident_type)) # else: # return func_type def visitParensShape(self, ctx): # type: (RelayParser.ParensShapeContext) -> int return self.visit(ctx.shape()) def visitShapeList(self, ctx): # type: (RelayParser.ShapeListContext) -> List[int] return self.visit_list(ctx.shape()) def visitTensor(self, ctx): return tuple(self.visit_list(ctx.expr())) def visitTensorType(self, ctx): # type: (RelayParser.TensorTypeContext) -> ty.TensorType """Create a simple tensor type. No generics.""" shape = self.visit(ctx.shapeList()) dtype = self.visit(ctx.type_()) if not isinstance(dtype, ty.TensorType): raise ParseError("Expected dtype to be a Relay base type.") dtype = dtype.dtype return ty.TensorType(shape, dtype) def visitTupleType(self, ctx): # type: (RelayParser.TupleTypeContext) -> ty.TupleType return ty.TupleType(self.visit_list(ctx.type_())) def visitFuncType(self, ctx): # type: (RelayParser.FuncTypeContext) -> ty.FuncType types = self.visit_list(ctx.type_()) arg_types = types[:-1] ret_type = types[-1] return ty.FuncType(arg_types, ret_type, [], None) def make_parser(data): # type: (str) -> RelayParser """Construct a RelayParser a given data stream.""" input_stream = InputStream(data) lexer = RelayLexer(input_stream) lexer.addErrorListener(StrictErrorListener(data)) token_stream = CommonTokenStream(lexer) p = RelayParser(token_stream) p.addErrorListener(StrictErrorListener(data)) return p __source_name_counter__ = 0 class StrictErrorListener(ErrorListener): """This ErrorListener fail eagerly on all error, and report the program.""" def __init__(self, text): self.text = text def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e): raise Exception("Syntax Error in:\n" + self.text) def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs): raise Exception("Ambiguity Error in:\n" + self.text) def reportAttemptingFullContext(self, recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs): raise Exception("Attempting Full Context in:\n" + self.text) def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs): raise Exception("Context Sensitivity in:\n" + self.text) def fromtext(data, source_name=None): # type: (str, str) -> Union[expr.Expr, module.Module] """Parse a Relay program.""" if data == "": raise ParseError("Cannot parse the empty string.") global __source_name_counter__ if source_name is None: source_name = "source_file{0}".format(__source_name_counter__) if isinstance(source_name, str): source_name = SourceName(source_name) tree = make_parser(data).prog() return ParseTreeToRelayIR(source_name).visit(tree)
[ [ [ 897, 912 ] ], [ [ 921, 924 ], [ 1125, 1128 ], [ 1748, 1751 ] ], [ [ 941, 953 ], [ 9408, 9420 ] ], [ [ 979, 984 ], [ 6052, 6057 ], [ 6059, 6064 ], [ 6135, 6140 ], [ 6218, 6223 ], [ 6225, 6230 ], [ 6555, 6560 ], [ 7413, 7418 ], [ 8370, 8375 ] ], [ [ 993, 996 ], [ 10165, 10168 ], [ 15684, 15687 ], [ 5446, 5449 ] ], [ [ 1012, 1018 ], [ 5919, 5925 ] ], [ [ 1037, 1041 ], [ 5399, 5403 ] ], [ [ 1043, 1053 ], [ 22532, 22542 ] ], [ [ 1068, 1072 ], [ 2426, 2430 ], [ 3081, 3085 ], [ 3425, 3429 ], [ 6888, 6892 ], [ 7143, 7147 ], [ 7921, 7925 ], [ 11113, 11117 ], [ 11264, 11268 ], [ 11415, 11419 ], [ 11624, 11628 ], [ 11730, 11734 ], [ 11948, 11952 ], [ 12489, 12493 ], [ 15774, 15778 ], [ 16749, 16753 ], [ 17473, 17477 ] ], [ [ 1087, 1089 ], [ 4498, 4500 ], [ 7769, 7771 ], [ 15120, 15122 ], [ 18692, 18694 ], [ 20102, 20104 ], [ 20235, 20237 ], [ 20377, 20379 ], [ 20638, 20640 ] ], [ [ 1104, 1106 ], [ 3527, 3529 ], [ 3561, 3563 ], [ 3593, 3595 ], [ 3622, 3624 ], [ 3656, 3658 ], [ 3686, 3688 ], [ 3719, 3721 ], [ 3755, 3757 ], [ 3794, 3796 ], [ 3825, 3827 ], [ 3872, 3874 ], [ 3907, 3909 ], [ 3941, 3943 ], [ 3973, 3975 ], [ 4010, 4012 ], [ 4056, 4058 ], [ 4102, 4104 ], [ 4148, 4150 ], [ 4191, 4193 ], [ 4221, 4223 ], [ 4260, 4262 ], [ 4303, 4305 ], [ 4337, 4339 ], [ 4369, 4371 ], [ 4392, 4394 ], [ 4414, 4416 ], [ 10689, 10691 ], [ 11785, 11787 ] ], [ [ 1108, 1122 ], [ 1730, 1744 ] ], [ [ 1195, 1207 ], [ 5726, 5738 ] ], [ [ 1249, 1260 ], [ 3510, 3521 ], [ 3544, 3555 ], [ 3576, 3587 ], [ 3605, 3616 ], [ 3639, 3650 ], [ 3669, 3680 ], [ 3702, 3713 ], [ 3738, 3749 ], [ 3777, 3788 ], [ 3808, 3819 ], [ 20973, 20984 ] ], [ [ 1301, 1311 ], [ 8321, 8331 ], [ 8441, 8451 ], [ 8716, 8726 ], [ 8953, 8963 ], [ 9027, 9037 ], [ 9110, 9120 ], [ 9363, 9373 ], [ 20842, 20852 ] ], [ [ 1444, 1455 ], [ 20812, 20823 ] ], [ [ 1457, 1474 ], [ 20940, 20957 ] ], [ [ 1518, 1531 ], [ 21118, 21131 ] ], [ [ 1784, 1794 ], [ 1928, 1938 ], [ 8619, 8629 ], [ 8858, 8868 ], [ 9279, 9289 ], [ 9447, 9457 ], [ 12788, 12798 ], [ 13076, 13086 ], [ 16197, 16207 ], [ 17886, 17896 ], [ 18864, 18874 ], [ 20136, 20146 ], [ 22291, 22301 ] ], [ [ 2123, 2132 ], [ 2197, 2206 ], [ 2658, 2667 ], [ 16674, 16683 ] ], [ [ 2190, 2196 ], [ 10682, 10688 ] ], [ [ 2651, 2657 ], [ 10641, 10647 ] ], [ [ 3491, 3501 ], [ 12712, 12722 ] ], [ [ 3842, 3850 ], [ 10612, 10620 ], [ 10648, 10656 ] ], [ [ 4425, 4438 ], [ 18603, 18616 ] ], [ [ 4494, 4495 ] ], [ [ 4577, 4583 ], [ 8363, 8369 ], [ 8539, 8545 ], [ 18741, 18747 ] ], [ [ 4814, 4821 ], [ 12870, 12877 ], [ 15840, 15847 ], [ 16795, 16802 ], [ 17029, 17036 ], [ 17520, 17527 ] ], [ [ 5707, 5725 ], [ 6355, 6373 ], [ 22604, 22622 ] ], [ [ 20686, 20697 ], [ 22568, 22579 ] ], [ [ 21063, 21086 ], [ 22447, 22470 ] ], [ [ 21098, 21117 ], [ 20894, 20913 ], [ 21022, 21041 ] ], [ [ 22133, 22141 ] ] ]
# Copyright (C) 2015 Stefan C. Mueller import functools from twisted.internet import defer def on_error_close(logger): """ Decorator for callback methods that implement `IProtocol`. Any uncaught exception is logged and the connection is closed forcefully. Usage:: import logger logger = logging.getLogger(__name__) class MyProtocol(Protocol): @on_error_close(logger.error) def connectionMade(): ... The argument passed to `on_error_close` will be invoked with a string message. The motivation behind this decorator is as follows: Due to bugs it sometimes happens that exceptions are thrown out out callback methods in protocols. Twisted ignores them, at best they are logged. This is always a bug, as errors should be handled in the callback and not let to continue up the call stack. As such, the behaviour after this occured is typically not well defined and unpredictable. A well made protocol implementation can handle unexpected connection losses as they may occur at any time in a real world environment. By closing the connection, there is a certain chance that we enter a code path that can recover, or at least gracefully cleanup. In my experience, this often means that unit-tests fail with a more useful error message. Without it, I sometimes get the case that a unit-test (or even the final application) just blocks forever with no information on what is going wrong. """ def make_wrapper(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): d = defer.maybeDeferred(func, self, *args, **kwargs) def on_error(err): logger("Unhandled failure in %r:%s" % (func, err. getTraceback())) if hasattr(self, "transport"): if hasattr(self.transport, "abortConnection"): self.transport.abortConnection() elif hasattr(self.transport, "loseConnection"): self.transport.loseConnection() d.addErrback(on_error) return wrapper return make_wrapper
[ [ [ 46, 55 ], [ 1646, 1655 ] ], [ [ 85, 90 ], [ 1741, 1746 ] ], [ [ 98, 112 ] ] ]
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class Key(object): @classmethod def from_path(cls, *args, **kwds): raise NotImplementedError("Paths are not currently supported") def __init__(self, encoded=None, obj=None): self.name = None if obj: self.id = obj.id self.kind = obj.kind() else: self.id = None self.kind = None def app(self): raise NotImplementedError("Applications are not currently supported") def kind(self): return self.kind def id(self): return self.id def name(self): raise NotImplementedError("Key Names are not currently supported") def id_or_name(self): return self.id def has_id_or_name(self): return self.id is not None def parent(self): raise NotImplementedError("Key parents are not currently supported") def __str__(self): return self.id_or_name()
[ [ [ 1115, 1118 ] ] ]
# Copyright 2013 Lars Butler & individual contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tokenize try: import StringIO except ImportError: import io StringIO = io INVALID_WKT_FMT = 'Invalid WKT: `%s`' def dump(obj, dest_file): """ Dump GeoJSON-like `dict` to WKT and write it to the `dest_file`. :param dict obj: A GeoJSON-like dictionary. It must at least the keys 'type' and 'coordinates'. :param dest_file: Open and writable file-like object. """ dest_file.write(dumps(obj)) def load(source_file): """ Load a GeoJSON `dict` object from a ``source_file`` containing WKT. :param source_file: Open and readable file-like object. :returns: A GeoJSON `dict` representing the geometry read from the file. """ return loads(source_file.read()) def dumps(obj, decimals=16): """ Dump a GeoJSON-like `dict` to a WKT string. """ geom_type = obj['type'] exporter = _dumps_registry.get(geom_type) if exporter is None: _unsupported_geom_type(geom_type) fmt = '%%.%df' % decimals return exporter(obj, fmt) def loads(string): """ Construct a GeoJSON `dict` from WKT (`string`). """ sio = StringIO.StringIO(string) # NOTE: This is not the intended purpose of `tokenize`, but it works. tokens = (x[1] for x in tokenize.generate_tokens(sio.readline)) tokens = _tokenize_wkt(tokens) geom_type = next(tokens) importer = _loads_registry.get(geom_type) if importer is None: _unsupported_geom_type(geom_type) return importer(tokens, string) def _tokenize_wkt(tokens): """ Since the tokenizer treats "-" and numeric strings as separate values, combine them and yield them as a single token. This utility encapsulates parsing of negative numeric values from WKT can be used generically in all parsers. """ negative = False for t in tokens: if t == '-': negative = True continue else: if negative: yield '-%s' % t else: yield t negative = False def _unsupported_geom_type(geom_type): raise ValueError("Unsupported geometry type '%s'" % geom_type) def _dump_point(obj, fmt): """ Dump a GeoJSON-like Point object to WKT. :param dict obj: A GeoJSON-like `dict` representing a Point. :param str fmt: Format string which indicates the number of digits to display after the decimal point when formatting coordinates. :returns: WKT representation of the input GeoJSON Point ``obj``. """ coords = obj['coordinates'] pt = 'POINT (%s)' % ' '.join(fmt % c for c in coords) return pt def _dump_linestring(obj, fmt): """ Dump a GeoJSON-like LineString object to WKT. Input parameters and return value are the LINESTRING equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] ls = 'LINESTRING (%s)' ls %= ', '.join(' '.join(fmt % c for c in pt) for pt in coords) return ls def _dump_polygon(obj, fmt): """ Dump a GeoJSON-like Polygon object to WKT. Input parameters and return value are the POLYGON equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] poly = 'POLYGON (%s)' rings = (', '.join(' '.join(fmt % c for c in pt) for pt in ring) for ring in coords) rings = ('(%s)' % r for r in rings) poly %= ', '.join(rings) return poly def _dump_multipoint(obj, fmt): """ Dump a GeoJSON-like MultiPoint object to WKT. Input parameters and return value are the MULTIPOINT equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mp = 'MULTIPOINT (%s)' points = (' '.join(fmt % c for c in pt) for pt in coords) # Add parens around each point. points = ('(%s)' % pt for pt in points) mp %= ', '.join(points) return mp def _dump_multilinestring(obj, fmt): """ Dump a GeoJSON-like MultiLineString object to WKT. Input parameters and return value are the MULTILINESTRING equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mlls = 'MULTILINESTRING (%s)' linestrs = ('(%s)' % ', '.join(' '.join(fmt % c for c in pt) for pt in linestr) for linestr in coords) mlls %= ', '.join(ls for ls in linestrs) return mlls def _dump_multipolygon(obj, fmt): """ Dump a GeoJSON-like MultiPolygon object to WKT. Input parameters and return value are the MULTIPOLYGON equivalent to :func:`_dump_point`. """ coords = obj['coordinates'] mp = 'MULTIPOLYGON (%s)' polys = ( # join the polygons in the multipolygon ', '.join( # join the rings in a polygon, # and wrap in parens '(%s)' % ', '.join( # join the points in a ring, # and wrap in parens '(%s)' % ', '.join( # join coordinate values of a vertex ' '.join(fmt % c for c in pt) for pt in ring) for ring in poly) for poly in coords) ) mp %= polys return mp def _dump_geometrycollection(obj, fmt): """ Dump a GeoJSON-like GeometryCollection object to WKT. Input parameters and return value are the GEOMETRYCOLLECTION equivalent to :func:`_dump_point`. The WKT conversions for each geometry in the collection are delegated to their respective functions. """ gc = 'GEOMETRYCOLLECTION (%s)' geoms = obj['geometries'] geoms_wkt = [] for geom in geoms: geom_type = geom['type'] geoms_wkt.append(_dumps_registry.get(geom_type)(geom, fmt)) gc %= ','.join(geoms_wkt) return gc def _load_point(tokens, string): """ :param tokens: A generator of string tokens for the input WKT, begining just after the geometry type. The geometry type is consumed before we get to here. For example, if :func:`loads` is called with the input 'POINT(0.0 1.0)', ``tokens`` would generate the following values: .. code-block:: python ['(', '0.0', '1.0', ')'] :param str string: The original WKT string. :returns: A GeoJSON `dict` Point representation of the WKT ``string``. """ if not next(tokens) == '(': raise ValueError(INVALID_WKT_FMT % string) coords = [] try: for t in tokens: if t == ')': break else: coords.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) return dict(type='Point', coordinates=coords) def _load_linestring(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling LINESTRING geometry. :returns: A GeoJSON `dict` LineString representation of the WKT ``string``. """ if not next(tokens) == '(': raise ValueError(INVALID_WKT_FMT % string) # a list of lists # each member list represents a point coords = [] try: pt = [] for t in tokens: if t == ')': coords.append(pt) break elif t == ',': # it's the end of the point coords.append(pt) pt = [] else: pt.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) return dict(type='LineString', coordinates=coords) def _load_polygon(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling POLYGON geometry. :returns: A GeoJSON `dict` Polygon representation of the WKT ``string``. """ open_parens = next(tokens), next(tokens) if not open_parens == ('(', '('): raise ValueError(INVALID_WKT_FMT % string) # coords contains a list of rings # each ring contains a list of points # each point is a list of 2-4 values coords = [] ring = [] on_ring = True try: pt = [] for t in tokens: if t == ')' and on_ring: # The ring is finished ring.append(pt) coords.append(ring) on_ring = False elif t == ')' and not on_ring: # it's the end of the polygon break elif t == '(': # it's a new ring ring = [] pt = [] on_ring = True elif t == ',' and on_ring: # it's the end of a point ring.append(pt) pt = [] elif t == ',' and not on_ring: # there's another ring. # do nothing pass else: pt.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) return dict(type='Polygon', coordinates=coords) def _load_multipoint(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTIPOINT geometry. :returns: A GeoJSON `dict` MultiPoint representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) coords = [] pt = [] paren_depth = 1 try: for t in tokens: if t == '(': paren_depth += 1 elif t == ')': paren_depth -= 1 if paren_depth == 0: break elif t == '': pass elif t == ',': # the point is done coords.append(pt) pt = [] else: pt.append(float(t)) except tokenize.TokenError: raise ValueError(INVALID_WKT_FMT % string) # Given the way we're parsing, we'll probably have to deal with the last # point after the loop if len(pt) > 0: coords.append(pt) return dict(type='MultiPoint', coordinates=coords) def _load_multipolygon(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTIPOLYGON geometry. :returns: A GeoJSON `dict` MultiPolygon representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) polygons = [] while True: try: poly = _load_polygon(tokens, string) polygons.append(poly['coordinates']) t = next(tokens) if t == ')': # we're done; no more polygons. break except StopIteration: # If we reach this, the WKT is not valid. raise ValueError(INVALID_WKT_FMT % string) return dict(type='MultiPolygon', coordinates=polygons) def _load_multilinestring(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling MULTILINESTRING geometry. :returns: A GeoJSON `dict` MultiLineString representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) linestrs = [] while True: try: linestr = _load_linestring(tokens, string) linestrs.append(linestr['coordinates']) t = next(tokens) if t == ')': # we're done; no more linestrings. break except StopIteration: # If we reach this, the WKT is not valid. raise ValueError(INVALID_WKT_FMT % string) return dict(type='MultiLineString', coordinates=linestrs) def _load_geometrycollection(tokens, string): """ Has similar inputs and return value to to :func:`_load_point`, except is for handling GEOMETRYCOLLECTIONs. Delegates parsing to the parsers for the individual geometry types. :returns: A GeoJSON `dict` GeometryCollection representation of the WKT ``string``. """ open_paren = next(tokens) if not open_paren == '(': raise ValueError(INVALID_WKT_FMT % string) geoms = [] result = dict(type='GeometryCollection', geometries=geoms) while True: try: t = next(tokens) if t == ')': break elif t == ',': # another geometry still continue else: geom_type = t load_func = _loads_registry.get(geom_type) geom = load_func(tokens, string) geoms.append(geom) except StopIteration: raise ValueError(INVALID_WKT_FMT % string) return result _dumps_registry = { 'Point': _dump_point, 'LineString': _dump_linestring, 'Polygon': _dump_polygon, 'MultiPoint': _dump_multipoint, 'MultiLineString': _dump_multilinestring, 'MultiPolygon': _dump_multipolygon, 'GeometryCollection': _dump_geometrycollection, } _loads_registry = { 'POINT': _load_point, 'LINESTRING': _load_linestring, 'POLYGON': _load_polygon, 'MULTIPOINT': _load_multipoint, 'MULTILINESTRING': _load_multilinestring, 'MULTIPOLYGON': _load_multipolygon, 'GEOMETRYCOLLECTION': _load_geometrycollection, }
[ [ [ 618, 626 ], [ 1903, 1911 ], [ 7201, 7209 ], [ 8073, 8081 ], [ 9572, 9580 ], [ 10566, 10574 ] ], [ [ 644, 652 ], [ 1775, 1783 ] ], [ [ 684, 686 ], [ 702, 704 ] ], [ [ 691, 699 ], [ 1775, 1783 ] ], [ [ 707, 722 ], [ 7008, 7023 ], [ 7247, 7262 ], [ 7641, 7656 ], [ 8119, 8134 ], [ 8560, 8575 ], [ 9618, 9633 ], [ 10042, 10057 ], [ 10612, 10627 ], [ 11196, 11211 ], [ 11605, 11620 ], [ 12051, 12066 ], [ 12472, 12487 ], [ 13003, 13018 ], [ 13556, 13571 ] ], [ [ 751, 755 ] ], [ [ 1079, 1083 ] ], [ [ 1384, 1389 ], [ 1061, 1066 ] ], [ [ 1682, 1687 ], [ 1352, 1357 ] ], [ [ 2164, 2177 ], [ 1956, 1969 ] ], [ [ 2707, 2729 ], [ 1581, 1603 ], [ 2088, 2110 ] ], [ [ 2815, 2826 ], [ 13636, 13647 ] ], [ [ 3312, 3328 ], [ 13667, 13683 ] ], [ [ 3650, 3663 ], [ 13700, 13713 ] ], [ [ 4083, 4099 ], [ 13733, 13749 ] ], [ [ 4523, 4544 ], [ 13774, 13795 ] ], [ [ 4985, 5003 ], [ 13817, 13835 ] ], [ [ 5802, 5826 ], [ 13863, 13887 ] ], [ [ 6385, 6396 ], [ 13926, 13937 ] ], [ [ 7330, 7346 ], [ 13957, 13973 ], [ 12147, 12163 ] ], [ [ 8207, 8220 ], [ 13990, 14003 ], [ 11289, 11302 ] ], [ [ 9703, 9719 ], [ 14023, 14039 ] ], [ [ 10851, 10869 ], [ 14107, 14125 ] ], [ [ 11697, 11718 ], [ 14064, 14085 ] ], [ [ 12567, 12591 ], [ 14153, 14177 ] ], [ [ 13602, 13617 ], [ 1516, 1531 ], [ 6292, 6307 ] ], [ [ 13893, 13908 ], [ 2023, 2038 ], [ 13382, 13397 ] ] ]
import binascii import re import socket from abc import ABCMeta from hashlib import md5 from ipaddress import ip_network, _BaseNetwork from typing import Iterable, Optional, Tuple, Generator, Dict, Iterator from django.conf import settings from django.utils.translation import ugettext_lazy as _ from djing.lib.decorators import LazyInitMetaclass from gw_app.nas_managers import core from gw_app.nas_managers import structs as i_structs DEBUG = getattr(settings, 'DEBUG', False) LIST_USERS_ALLOWED = 'DjingUsersAllowed' LIST_DEVICES_ALLOWED = 'DjingDevicesAllowed' class ApiRos(object): """Routeros api""" __sk = None is_login = False def __init__(self, ip: str, port: int): if self.__sk is None: sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sk.connect((ip, port or 8728)) self.__sk = sk def login(self, username, pwd): if self.is_login: return chal = None for repl, attrs in self.talk_iter(("/login",)): chal = binascii.unhexlify(attrs['=ret']) md = md5() md.update(b'\x00') md.update(bytes(pwd, 'utf-8')) md.update(chal) for _ in self.talk_iter(("/login", "=name=" + username, "=response=00" + binascii.hexlify( md.digest()).decode('utf-8'))): pass self.is_login = True def talk_iter(self, words: Iterable): if self.write_sentence(words) == 0: return while 1: i = self.read_sentence() if len(i) == 0: continue reply = i[0] attrs = {} for w in i[1:]: j = w.find('=', 1) if j == -1: attrs[w] = '' else: attrs[w[:j]] = w[j + 1:] yield (reply, attrs) if reply == '!done': return def write_sentence(self, words: Iterable): ret = 0 for w in words: self.write_word(w) ret += 1 self.write_word('') return ret def read_sentence(self): r = [] while 1: w = self.read_word() if w == '': return r r.append(w) def write_word(self, w): if DEBUG: print("<<< " + w) b = bytes(w, "utf-8") self.write_len(len(b)) self.write_bytes(b) def read_word(self): ret = self.read_bytes(self.read_len()).decode('utf-8') if DEBUG: print(">>> " + ret) return ret def write_len(self, l): if l < 0x80: self.write_bytes(bytes((l,))) elif l < 0x4000: l |= 0x8000 self.write_bytes(bytes(((l >> 8) & 0xff, l & 0xff))) elif l < 0x200000: l |= 0xC00000 self.write_bytes( bytes(((l >> 16) & 0xff, (l >> 8) & 0xff, l & 0xff))) elif l < 0x10000000: l |= 0xE0000000 self.write_bytes(bytes(((l >> 24) & 0xff, (l >> 16) & 0xff, (l >> 8) & 0xff, l & 0xff))) else: self.write_bytes(bytes((0xf0, (l >> 24) & 0xff, (l >> 16) & 0xff, (l >> 8) & 0xff, l & 0xff))) def read_len(self): c = self.read_bytes(1)[0] if (c & 0x80) == 0x00: pass elif (c & 0xC0) == 0x80: c &= ~0xC0 c <<= 8 c += self.read_bytes(1)[0] elif (c & 0xE0) == 0xC0: c &= ~0xE0 c <<= 8 c += self.read_bytes(1)[0] c <<= 8 c += self.read_bytes(1)[0] elif (c & 0xF0) == 0xE0: c &= ~0xF0 c <<= 8 c += self.read_bytes(1)[0] c <<= 8 c += self.read_bytes(1)[0] c <<= 8 c += self.read_bytes(1)[0] elif (c & 0xF8) == 0xF0: c = self.read_bytes(1)[0] c <<= 8 c += self.read_bytes(1)[0] c <<= 8 c += self.read_bytes(1)[0] c <<= 8 c += self.read_bytes(1)[0] return c def write_bytes(self, s): n = 0 while n < len(s): r = self.__sk.send(s[n:]) if r == 0: raise core.NasFailedResult("connection closed by remote end") n += r def read_bytes(self, length): ret = b'' while len(ret) < length: s = self.__sk.recv(length - len(ret)) if len(s) == 0: raise core.NasFailedResult("connection closed by remote end") ret += s return ret def __del__(self): if self.__sk is not None: self.__sk.close() class MikrotikTransmitter(core.BaseTransmitter, ApiRos, metaclass=type('_ABC_Lazy_mcs', (ABCMeta, LazyInitMetaclass), {})): description = _('Mikrotik NAS') def __init__(self, login: str, password: str, ip: str, port: int, enabled: bool, *args, **kwargs): if not enabled: raise core.NasFailedResult(_('Gateway disabled')) try: core.BaseTransmitter.__init__( self, login=login, password=password, ip=ip, port=port, *args, **kwargs ) ApiRos.__init__(self, ip, port) self.login(username=login, pwd=password) except ConnectionRefusedError: raise core.NasNetworkError('Connection to %s is Refused' % ip) def _exec_cmd(self, cmd: Iterable) -> Dict: if not isinstance(cmd, (list, tuple)): raise TypeError r = dict() for k, v in self.talk_iter(cmd): if k == '!done': break elif k == '!trap': raise core.NasFailedResult(v.get('=message')) r[k] = v or None return r def _exec_cmd_iter(self, cmd: Iterable) -> Generator: if not isinstance(cmd, (list, tuple)): raise TypeError for k, v in self.talk_iter(cmd): if k == '!done': break elif k == '!trap': raise core.NasFailedResult(v.get('=message')) if v: yield v @staticmethod def _build_shape_obj(info: Dict) -> i_structs.SubnetQueue: # Переводим приставку скорости Mikrotik в Mbit/s def parse_speed(text_speed): text_speed_digit = float(text_speed[:-1] or 0.0) text_append = text_speed[-1:] if text_append == 'M': res = text_speed_digit elif text_append == 'k': res = text_speed_digit / 1000 # elif text_append == 'G': # res = text_speed_digit * 0x400 else: res = float(re.sub(r'[a-zA-Z]', '', text_speed)) / 1000 ** 2 return res speed_out, speed_in = info['=max-limit'].split('/') speed_in = parse_speed(speed_in) speed_out = parse_speed(speed_out) try: target = info.get('=target') if target is None: target = info.get('=target-addresses') name = info.get('=name') disabled = info.get('=disabled', False) if disabled is not None: disabled = True if disabled == 'true' else False if target and name: # target may be '192.168.0.3/32,192.168.0.2/32' net = target.split(',')[0] if not net: return a = i_structs.SubnetQueue( name=name, network=net, max_limit=(speed_in, speed_out), is_access=not disabled, queue_id=info.get('=.id') ) return a except ValueError as e: print('ValueError:', e) ################################################# # QUEUES ################################################# # Find queue by name def find_queue(self, name: str) -> Optional[i_structs.SubnetQueue]: r = self._exec_cmd(('/queue/simple/print', '?name=%s' % name)) if r: return self._build_shape_obj(r.get('!re')) def add_queue(self, queue: i_structs.SubnetQueue) -> None: if not isinstance(queue, i_structs.SubnetQueue): raise TypeError('queue must be instance of SubnetQueue') return self._exec_cmd(( '/queue/simple/add', '=name=%s' % queue.name, # FIXME: тут в разных микротиках или =target-addresses или =target '=target=%s' % queue.network, '=max-limit=%.3fM/%.3fM' % queue.max_limit, '=queue=Djing_pcq_up/Djing_pcq_down', '=burst-time=1/5', #'=total-queue=Djing_pcq_down' )) def remove_queue(self, queue: i_structs.SubnetQueue) -> None: if not isinstance(queue, i_structs.SubnetQueue): raise TypeError if not queue.queue_id: queue = self.find_queue(queue.name) if queue is not None: if queue.queue_id: self._exec_cmd(( '/queue/simple/remove', '=.id=%s' % queue.queue_id )) def remove_queue_range(self, q_ids: Iterable[str]): ids = ','.join(q_ids) if len(ids) > 1: self._exec_cmd(('/queue/simple/remove', '=numbers=%s' % ids)) def update_queue(self, queue: i_structs.SubnetQueue): if not isinstance(queue, i_structs.SubnetQueue): raise TypeError queue_gw = self.find_queue(queue.name) if queue_gw is None: return self.add_queue(queue) else: cmd = [ '/queue/simple/set', '=name=%s' % queue.name, '=max-limit=%.3fM/%.3fM' % queue.max_limit, # FIXME: тут в разных версиях прошивки микротика # или =target-addresses или =target '=target=%s' % queue.network, '=queue=Djing_pcq_up/Djing_pcq_down', '=burst-time=1/1' ] if queue.queue_id: cmd.insert(1, '=.id=%s' % queue.queue_id) r = self._exec_cmd(cmd) return r def read_queue_iter(self) -> Generator: for dat in self._exec_cmd_iter(('/queue/simple/print', '=detail')): sobj = self._build_shape_obj(dat) if sobj is not None: yield sobj ################################################# # Ip->firewall->address list ################################################# def add_ip(self, list_name: str, net): if not issubclass(net.__class__, _BaseNetwork): raise TypeError commands = ( '/ip/firewall/address-list/add', '=list=%s' % list_name, '=address=%s' % net ) return self._exec_cmd(commands) def remove_ip(self, mk_id): return self._exec_cmd(( '/ip/firewall/address-list/remove', '=.id=%s' % mk_id )) def remove_ip_range(self, ip_firewall_ids: Iterable[str]): return self._exec_cmd(( '/ip/firewall/address-list/remove', '=numbers=%s' % ','.join(ip_firewall_ids) )) def find_ip(self, net, list_name: str): if not issubclass(net.__class__, _BaseNetwork): raise TypeError if net.prefixlen == net.max_prefixlen: ip = net.network_address else: ip = net.with_prefixlen r = self._exec_cmd(( '/ip/firewall/address-list/print', 'where', '?list=%s' % list_name, '?address=%s' % ip )) return r.get('!re') def read_nets_iter(self, list_name: str) -> Generator: nets = self._exec_cmd_iter(( '/ip/firewall/address-list/print', 'where', '?list=%s' % list_name, '?dynamic=no' )) for dat in nets: n = ip_network(dat.get('=address')) n.queue_id = dat.get('=.id') yield n def update_ip(self, net): if not issubclass(net.__class__, _BaseNetwork): raise TypeError res_net_gw = self.find_ip(net, LIST_USERS_ALLOWED) if not res_net_gw: self.add_ip(LIST_USERS_ALLOWED, net) ################################################# # BaseTransmitter implementation ################################################# def add_user_range(self, queue_list: i_structs.VectorQueue): for q in queue_list: self.add_user(q) def remove_user_range(self, queues: i_structs.VectorQueue): if not isinstance(queues, (tuple, list, set)): raise ValueError('*users* is used twice, generator does not fit') queue_ids = (q.queue_id for q in queues if q) self.remove_queue_range(queue_ids) for q in queues: if isinstance(q, i_structs.SubnetQueue): ip_list_entity = self.find_ip(q.network, LIST_USERS_ALLOWED) if ip_list_entity: self.remove_ip(ip_list_entity.get('=.id')) def add_user(self, queue: i_structs.SubnetQueue, *args): try: self.add_queue(queue) except core.NasFailedResult as e: print('Error:', e) net = queue.network if not issubclass(net.__class__, _BaseNetwork): raise TypeError try: self.add_ip(LIST_USERS_ALLOWED, net) except core.NasFailedResult as e: print('Error:', e) def remove_user(self, queue: i_structs.SubnetQueue): self.remove_queue(queue) r = self.find_ip(queue.network, LIST_USERS_ALLOWED) if r: ip_id = r.get('=.id') self.remove_ip(ip_id) def update_user(self, queue: i_structs.SubnetQueue, *args): if queue.is_access: self.update_queue(queue) self.update_ip(queue.network) else: self.remove_queue(queue) res_ips = self.find_ip(queue.network, LIST_USERS_ALLOWED) if res_ips: self.remove_ip(res_ips.get('=.id')) def ping(self, host, count=10) -> Optional[Tuple[int, int]]: r = self._exec_cmd(( '/ip/arp/print', '?address=%s' % host )) if r == {}: return interface = r['!re'].get('=interface') r = self._exec_cmd(( '/ping', '=address=%s' % host, '=arp-ping=yes', '=interval=100ms', '=count=%d' % count, '=interface=%s' % interface )) res = r.get('!re') if res is not None: received, sent = int(res.get('=received')), int(res.get('=sent')) return received, sent def read_users(self) -> i_structs.VectorQueue: return self.read_queue_iter() def sync_nas(self, users_from_db: Iterator): queues_from_db = ( ab.build_agent_struct() for ab in users_from_db if ab is not None and ab.is_access() ) queues_from_db = set(filter(lambda x: x is not None, queues_from_db)) queues_from_gw = self.read_queue_iter() user_q_for_add, user_q_for_del = core.diff_set(queues_from_db, set(queues_from_gw)) self.remove_queue_range( (q.queue_id for q in user_q_for_del) ) for q in user_q_for_add: self.add_queue(q) del user_q_for_add, user_q_for_del # sync ip addrs list db_nets = set(net.network for net in queues_from_db) gw_nets = set(self.read_nets_iter(LIST_USERS_ALLOWED)) nets_add, nets_del = core.diff_set(db_nets, gw_nets) self.remove_ip_range( (q.queue_id for q in nets_del) ) for q in nets_add: self.add_ip(LIST_USERS_ALLOWED, q)
[ [ [ 7, 15 ], [ 1041, 1049 ], [ 1298, 1306 ] ], [ [ 23, 25 ], [ 7013, 7015 ] ], [ [ 33, 39 ], [ 744, 750 ], [ 758, 764 ], [ 774, 780 ] ], [ [ 56, 63 ], [ 5014, 5021 ] ], [ [ 84, 87 ], [ 1088, 1091 ] ], [ [ 110, 120 ], [ 12337, 12347 ] ], [ [ 122, 134 ], [ 11026, 11038 ], [ 11702, 11714 ], [ 12502, 12514 ], [ 13757, 13769 ] ], [ [ 154, 162 ], [ 1463, 1471 ], [ 2010, 2018 ], [ 5739, 5747 ], [ 6118, 6126 ], [ 9575, 9583 ], [ 11455, 11463 ] ], [ [ 164, 172 ], [ 8322, 8330 ], [ 14576, 14584 ] ], [ [ 174, 179 ], [ 14585, 14590 ] ], [ [ 181, 190 ], [ 6131, 6140 ], [ 10598, 10607 ], [ 12119, 12128 ] ], [ [ 192, 196 ], [ 5752, 5756 ], [ 6494, 6498 ] ], [ [ 198, 206 ], [ 15279, 15287 ] ], [ [ 232, 240 ], [ 455, 463 ] ], [ [ 278, 296 ], [ 5067, 5068 ], [ 5269, 5270 ] ], [ [ 330, 347 ], [ 5023, 5040 ] ], [ [ 380, 384 ], [ 4884, 4888 ], [ 4411, 4415 ], [ 4672, 4676 ], [ 5248, 5252 ], [ 5317, 5321 ], [ 5652, 5656 ], [ 5997, 6001 ], [ 6362, 6366 ], [ 13630, 13634 ], [ 13877, 13881 ], [ 15604, 15608 ], [ 16092, 16096 ] ], [ [ 417, 437 ], [ 6503, 6512 ], [ 7775, 7784 ], [ 8331, 8340 ], [ 8527, 8536 ], [ 8592, 8601 ], [ 9134, 9143 ], [ 9199, 9208 ], [ 9755, 9764 ], [ 9812, 9821 ], [ 12876, 12885 ], [ 12999, 13008 ], [ 13307, 13316 ], [ 13537, 13546 ], [ 13969, 13978 ], [ 14202, 14211 ], [ 15179, 15188 ] ], [ [ 439, 444 ], [ 2369, 2374 ], [ 2595, 2600 ] ], [ [ 482, 500 ], [ 12584, 12602 ], [ 12655, 12673 ], [ 13388, 13406 ], [ 13837, 13855 ], [ 14066, 14084 ], [ 14441, 14459 ], [ 16042, 16060 ], [ 16258, 16276 ] ], [ [ 523, 543 ] ], [ [ 576, 582 ], [ 4906, 4912 ], [ 5510, 5516 ] ], [ [ 4864, 4883 ] ] ]
# coding: utf-8 """ Fabric task for deploying project on servers(production, staging, development) """ import os import sys from contextlib import contextmanager from fabric.contrib import django from fabric.api import local, run, lcd, cd from fabric.tasks import Task from fab_settings import env sys.path.append(os.path.dirname(__file__) + '/../../mysite/') django.settings_module('mysite.settings') STAGING_BRANCH = 'master' BASE_PATH = os.path.dirname(__file__) STAGING_HOST = 'staging.courselets.org' def debug(*args, **kwargs): output = "" for x in args: print(x) output += str(x) return output @contextmanager def debug_cd(path): print("run on path:{0}".format(path)) yield class Deploying(Task): """ Deploy project on Production """ func = local func_cd = lcd code_branch = STAGING_BRANCH @property def project_path(self): return os.path.join(BASE_PATH, 'socraticqs2') @property def local_settings_path(self): return os.path.join(self.project_path, '../settings') def __virtualenv(self): with self.func_cd(os.path.join(self.project_path, '../')): self.func('source {}/bin/activate'.format(env.venv_name)) def update_requirements(self): with self.func_cd(self.project_path): self.func("sudo pip install -r requirements.txt") def _get_settings(self, branch='master'): with self.func_cd(self.local_settings_path): self.func('git pull origin {0}'.format(branch)) self.func('cp production_conf.py ../socraticqs2/mysite/mysite/settings/production_conf.py') def __restart_service(self): self.func('sudo supervisorctl restart gunicorn') self.func('sudo supervisorctl restart celery') self.func('sudo service nginx restart') @property def __is_new_branch(self): if self.func == run: return self.code_branch in self.func('git branch') else: return self.code_branch in self.func('git branch', capture=True) def __update(self): if self.__is_new_branch: self.func('git checkout {0} --force'.format(self.code_branch)) self.func('git pull origin {0} --force'.format(self.code_branch)) else: self.func('git fetch origin') self.func('git checkout -b {0} origin/{0}'.format(self.code_branch)) self._get_settings() self.func('find . -name "*.pyc" -print -delete') self.__virtualenv() self.update_requirements() with self.func_cd("mysite"): self.func('python manage.py collectstatic --noinput') self.func('python manage.py syncdb --noinput') self.func('python manage.py fsm_deploy --noinput') self.__restart_service() def run(self, running='local', branch='master', suffix=None): self.code_branch = branch if running == 'local': self.func = local self.func_cd = lcd self.__update() elif running == 'remote': self.func = run self.func_cd = cd env.hosts = [STAGING_HOST, ] global BASE_PATH BASE_PATH = env.project_root with self.func_cd(self.project_path): self.__update() elif running == 'debug': print("DEBUG:\n") self.func = debug self.func_cd = debug_cd self.__update() class Staging(Deploying): """Deploy on Staging""" def _get_settings(self, branch='master'): """On dev/staging we don't use production settings""" with self.func_cd(self.local_settings_path): self.func('git pull origin {0} --force'.format(branch)) self.func('cp local_conf.py ../dev/socraticqs2/mysite/mysite/settings/local_conf.py') class Development(Staging): """Deploy on Development server Args: running - deploy code local or in server(local/run) branch - git branch name Example: fab deploy.dev:running='local', branch='dev' """ @property def project_path(self): if self.func == local: return os.path.join(BASE_PATH, '../../../../dev') else: return os.path.join(BASE_PATH, 'dev/socraticqs2') @property def local_settings_path(self): if self.func == local: return os.path.join(self.project_path, '../settings') else: return os.path.join(self.project_path, '../../settings') code_branch = 'dev' prod = Deploying() staging = Staging() dev = Development()
[ [ [ 113, 115 ], [ 320, 322 ], [ 448, 450 ], [ 930, 932 ], [ 1034, 1036 ], [ 1136, 1138 ], [ 4218, 4220 ], [ 4294, 4296 ], [ 4437, 4439 ], [ 4517, 4519 ] ], [ [ 123, 126 ], [ 304, 307 ] ], [ [ 150, 164 ], [ 642, 656 ] ], [ [ 193, 199 ], [ 367, 373 ] ], [ [ 223, 228 ], [ 815, 820 ], [ 2991, 2996 ], [ 4192, 4197 ], [ 4411, 4416 ] ], [ [ 230, 233 ], [ 1919, 1922 ], [ 3114, 3117 ] ], [ [ 235, 238 ], [ 835, 838 ], [ 3024, 3027 ] ], [ [ 240, 242 ], [ 3145, 3147 ] ], [ [ 268, 272 ], [ 747, 751 ] ], [ [ 299, 302 ], [ 1231, 1234 ], [ 3160, 3163 ], [ 3242, 3245 ] ], [ [ 410, 424 ], [ 857, 871 ] ], [ [ 436, 445 ], [ 943, 952 ], [ 4231, 4240 ], [ 4307, 4316 ] ], [ [ 474, 486 ], [ 3173, 3185 ] ], [ [ 520, 525 ], [ 3428, 3433 ] ], [ [ 661, 669 ], [ 3461, 3469 ] ], [ [ 737, 746 ], [ 3514, 3523 ], [ 4601, 4610 ] ], [ [ 3506, 3513 ], [ 3902, 3909 ], [ 4623, 4630 ] ], [ [ 3890, 3901 ], [ 4639, 4650 ] ], [ [ 4594, 4598 ] ], [ [ 4613, 4620 ] ], [ [ 4633, 4636 ] ], [ [ 3230, 3239 ] ] ]
# coding: utf-8 """ Isilon SDK Isilon SDK - Language bindings for the OneFS API # noqa: E501 OpenAPI spec version: 5 Contact: sdk@isilon.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import isi_sdk_8_1_0 from isi_sdk_8_1_0.models.license_license_tier_entitlements_exceeded_alert import LicenseLicenseTierEntitlementsExceededAlert # noqa: E501 from isi_sdk_8_1_0.rest import ApiException class TestLicenseLicenseTierEntitlementsExceededAlert(unittest.TestCase): """LicenseLicenseTierEntitlementsExceededAlert unit test stubs""" def setUp(self): pass def tearDown(self): pass def testLicenseLicenseTierEntitlementsExceededAlert(self): """Test LicenseLicenseTierEntitlementsExceededAlert""" # FIXME: construct object with mandatory attributes with example values # model = isi_sdk_8_1_0.models.license_license_tier_entitlements_exceeded_alert.LicenseLicenseTierEntitlementsExceededAlert() # noqa: E501 pass if __name__ == '__main__': unittest.main()
[ [ [ 259, 274 ] ], [ [ 283, 291 ], [ 554, 562 ], [ 1118, 1126 ] ], [ [ 300, 313 ] ], [ [ 396, 439 ] ], [ [ 485, 497 ] ], [ [ 506, 553 ] ] ]
# -*- coding: utf-8 -*- """ :copyright: Copyright 2020-2022 Sphinx Confluence Builder Contributors (AUTHORS) :license: BSD-2-Clause (LICENSE) """ from tests.lib.testcase import ConfluenceTestCase from tests.lib.testcase import setup_builder import os class TestConfluenceMetadata(ConfluenceTestCase): @classmethod def setUpClass(cls): super(TestConfluenceMetadata, cls).setUpClass() cls.dataset = os.path.join(cls.datasets, 'metadata') def test_confluence_metadata_directive_expected(self): with self.prepare(self.dataset) as app: app.build() builder_metadata = app.builder.metadata self.assertTrue(builder_metadata) self.assertTrue('index' in builder_metadata) doc_labels = builder_metadata['index'] self.assertTrue(doc_labels) self.assertTrue('labels' in doc_labels) labels = doc_labels['labels'] self.assertEqual(len(labels), 2) self.assertTrue('tag-a' in labels) self.assertTrue('tag-c' in labels) @setup_builder('html') def test_html_confluence_metadata_directive_ignore(self): with self.prepare(self.dataset, relax=True) as app: # build attempt should not throw an exception/error app.build()
[ [ [ 178, 196 ], [ 283, 301 ] ], [ [ 228, 241 ], [ 1084, 1097 ] ], [ [ 249, 251 ], [ 425, 427 ] ], [ [ 260, 282 ], [ 360, 382 ] ] ]
import asyncio import websockets import time import threading players = 0 class Player: def __init__(self, id, x = 0, y = 0, speed = 5): self.id = id self.x = x self.y = y self.dirX = 0 self.dirY = 0 self.speed = speed print("Player criado com sucesso!") def setX(self, x): self.x = x def setY(self, y): self.y = y def getX(self): return self.x def getY(self): return self.y async def hello(websocket, path): global players jogador = Player(players, 500, 500) async def moveUP(): while 1: jogador.setY(jogador.getY()-jogador.speed) websocket.send("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY())) print("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY())) time.sleep(1) async def moveR(): while 1: jogador.setX(jogador.getX()+jogador.speed) await websocket.send("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY())) print("move:"+str(jogador.id)+":"+ str(jogador.getX())+":"+str(jogador.getY())) time.sleep(1) def threadEvoque(): global players loop = asyncio.new_event_loop() task = loop.create_task(moveUP()) loop.run_until_complete(task) players += 1 print(players) def threadEvoque2(): global players loop = asyncio.new_event_loop() task2 = loop.create_task(moveR()) loop.run_until_complete(task2) players += 1 print(players) while 1: msg = await websocket.recv() print(msg) if(msg == "start"): players +=1 await websocket.send("spawn:"+str(players)+":"+ str(jogador.getX())+":"+str(jogador.getY())) print("spawn:"+str(players)+":"+ str(jogador.getX())+":"+str(jogador.getY())) start_server = websockets.serve(hello, "0.0.0.0", 8888) print("Iniciando server...") asyncio.get_event_loop().run_until_complete(start_server) print("Sever em funcionamento!") asyncio.get_event_loop().run_forever()
[ [ [ 7, 14 ], [ 2140, 2147 ], [ 2231, 2238 ], [ 1312, 1319 ], [ 1530, 1537 ] ], [ [ 22, 32 ], [ 2070, 2080 ] ], [ [ 40, 44 ], [ 910, 914 ], [ 1235, 1239 ] ], [ [ 52, 61 ] ], [ [ 63, 70 ], [ 585, 592 ], [ 1795, 1802 ] ], [ [ 84, 90 ], [ 578, 584 ] ], [ [ 511, 2001 ], [ 2087, 2092 ] ], [ [ 2055, 2067 ], [ 2184, 2196 ] ] ]
__author__ = 'pulphix' from app import TestApplication
[ [ [ 0, 10 ] ], [ [ 40, 55 ] ] ]
# coding: utf-8 from __future__ import unicode_literals from django.contrib import admin from .models import ThumbnailOption from django.contrib.admin.widgets import AdminFileWidget @admin.register(ThumbnailOption) class ThumbnailOptionAdmin(admin.ModelAdmin): fields = ['source', 'alias', 'options'] class ThumbnailOptionMixin(admin.ModelAdmin): class Media: pass def media(self): pass
[ [ [ 39, 55 ] ], [ [ 84, 89 ], [ 246, 251 ], [ 187, 192 ], [ 338, 343 ] ], [ [ 111, 126 ], [ 202, 217 ] ], [ [ 168, 183 ] ], [ [ 225, 245 ] ], [ [ 317, 337 ] ] ]
from database.adatabase import ADatabase import pandas as pd class SEC(ADatabase): def __init__(self): super().__init__("sec") def retrieve_num_data(self,adsh): try: db = self.client[self.name] table = db["nums"] data = table.find({"adsh":adsh},{"_id":0},show_record_id=False) return pd.DataFrame(list(data)) except Exception as e: print(str(e)) def retrieve_filing_data(self,cik): try: db = self.client[self.name] table = db["filings"] data = table.find({"cik":cik},{"_id":0},show_record_id=False) return pd.DataFrame(list(data)) except Exception as e: print(str(e)) def retrieve_adshs(self): try: db = self.client[self.name] table = db["filings"] data = table.find({},{"_id":0,"adsh":1},show_record_id=False) return pd.DataFrame(list(data)) except Exception as e: print(str(e))
[ [ [ 31, 40 ], [ 71, 80 ] ], [ [ 48, 60 ], [ 362, 364 ], [ 665, 667 ], [ 962, 964 ] ], [ [ 67, 70 ] ] ]
import game_framework from pico2d import * import title_state name = "StartState" image = None logo_time = 0.0 def enter(): global image image = load_image('kpu_credit.png') def exit(): global image del(image) def update(): global logo_time if (logo_time > 1.0): logo_time = 0.8 game_framework.change_state(title_state) delay(0.01) logo_time += 0.05 def draw(): global image clear_canvas() image.draw(400,300) update_canvas() def handle_events(): events = get_events() pass def pause(): pass def resume(): pass
[ [ [ 7, 21 ], [ 327, 341 ] ], [ [ 41, 42 ], [ 156, 166 ], [ 373, 378 ], [ 442, 454 ], [ 485, 498 ], [ 538, 548 ] ], [ [ 50, 61 ], [ 355, 366 ] ], [ [ 63, 67 ] ], [ [ 83, 88 ], [ 224, 229 ], [ 461, 466 ] ], [ [ 96, 105 ], [ 277, 286 ] ], [ [ 118, 123 ] ], [ [ 191, 195 ] ], [ [ 237, 243 ] ], [ [ 413, 417 ] ], [ [ 508, 521 ] ], [ [ 566, 571 ] ], [ [ 586, 592 ] ], [ [ 148, 153 ] ], [ [ 303, 312 ], [ 389, 398 ] ] ]
# -*- coding: utf-8 -*- """ Created on Thu Aug 13 20:30:46 2020 @author: Aaronga """ # Datos faltantes import numpy as np import matplotlib.pyplot as plt import pandas as pd dataset = pd.read_csv("Data.csv") X = dataset.iloc[:, :-1].values y = dataset.iloc[:, 3].values # Tratamiento de los NaN from sklearn.preprocessing import Imputer imputer = Imputer(missing_values="NaN", strategy="mean", axis = 0) imputer = imputer.fit(X[:, 1:3]) X[:, 1:3]= imputer.transform(X[:,1:3]) print(X)
[ [ [ 112, 123 ] ], [ [ 131, 155 ] ], [ [ 163, 175 ], [ 187, 189 ] ], [ [ 177, 184 ], [ 215, 222 ], [ 247, 254 ] ], [ [ 211, 212 ], [ 431, 432 ], [ 471, 472 ], [ 442, 443 ], [ 487, 488 ] ], [ [ 243, 244 ] ], [ [ 334, 341 ], [ 352, 359 ] ], [ [ 342, 349 ], [ 419, 426 ] ], [ [ 409, 416 ], [ 453, 460 ] ] ]
from sqlalchemy.testing import assert_raises, eq_ from sqlalchemy.testing import fixtures, AssertsCompiledSQL from sqlalchemy import ( testing, exc, case, select, literal_column, text, and_, Integer, cast, String, Column, Table, MetaData) from sqlalchemy.sql import table, column info_table = None class CaseTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = 'default' @classmethod def setup_class(cls): metadata = MetaData(testing.db) global info_table info_table = Table( 'infos', metadata, Column('pk', Integer, primary_key=True), Column('info', String(30))) info_table.create() info_table.insert().execute( {'pk': 1, 'info': 'pk_1_data'}, {'pk': 2, 'info': 'pk_2_data'}, {'pk': 3, 'info': 'pk_3_data'}, {'pk': 4, 'info': 'pk_4_data'}, {'pk': 5, 'info': 'pk_5_data'}, {'pk': 6, 'info': 'pk_6_data'}) @classmethod def teardown_class(cls): info_table.drop() @testing.fails_on('firebird', 'FIXME: unknown') @testing.requires.subqueries def test_case(self): inner = select( [ case( [ [info_table.c.pk < 3, 'lessthan3'], [ and_(info_table.c.pk >= 3, info_table.c.pk < 7), 'gt3']]).label('x'), info_table.c.pk, info_table.c.info], from_obj=[info_table]) inner_result = inner.execute().fetchall() # Outputs: # lessthan3 1 pk_1_data # lessthan3 2 pk_2_data # gt3 3 pk_3_data # gt3 4 pk_4_data # gt3 5 pk_5_data # gt3 6 pk_6_data assert inner_result == [ ('lessthan3', 1, 'pk_1_data'), ('lessthan3', 2, 'pk_2_data'), ('gt3', 3, 'pk_3_data'), ('gt3', 4, 'pk_4_data'), ('gt3', 5, 'pk_5_data'), ('gt3', 6, 'pk_6_data') ] outer = select([inner.alias('q_inner')]) outer_result = outer.execute().fetchall() assert outer_result == [ ('lessthan3', 1, 'pk_1_data'), ('lessthan3', 2, 'pk_2_data'), ('gt3', 3, 'pk_3_data'), ('gt3', 4, 'pk_4_data'), ('gt3', 5, 'pk_5_data'), ('gt3', 6, 'pk_6_data') ] w_else = select( [ case( [ [info_table.c.pk < 3, cast(3, Integer)], [ and_( info_table.c.pk >= 3, info_table.c.pk < 6), 6]], else_=0).label('x'), info_table.c.pk, info_table.c.info], from_obj=[info_table]) else_result = w_else.execute().fetchall() assert else_result == [ (3, 1, 'pk_1_data'), (3, 2, 'pk_2_data'), (6, 3, 'pk_3_data'), (6, 4, 'pk_4_data'), (6, 5, 'pk_5_data'), (0, 6, 'pk_6_data') ] def test_literal_interpretation(self): t = table('test', column('col1')) assert_raises(exc.ArgumentError, case, [("x", "y")]) self.assert_compile( case([("x", "y")], value=t.c.col1), "CASE test.col1 WHEN :param_1 THEN :param_2 END") self.assert_compile( case([(t.c.col1 == 7, "y")], else_="z"), "CASE WHEN (test.col1 = :col1_1) THEN :param_1 ELSE :param_2 END") def test_text_doesnt_explode(self): for s in [ select( [ case( [ ( info_table.c.info == 'pk_4_data', text("'yes'"))], else_=text("'no'")) ]).order_by(info_table.c.info), select( [ case( [ ( info_table.c.info == 'pk_4_data', literal_column("'yes'"))], else_=literal_column("'no'") )] ).order_by(info_table.c.info), ]: if testing.against("firebird"): eq_(s.execute().fetchall(), [ ('no ', ), ('no ', ), ('no ', ), ('yes', ), ('no ', ), ('no ', ), ]) else: eq_(s.execute().fetchall(), [ ('no', ), ('no', ), ('no', ), ('yes', ), ('no', ), ('no', ), ]) @testing.fails_on('firebird', 'FIXME: unknown') def testcase_with_dict(self): query = select( [ case( { info_table.c.pk < 3: 'lessthan3', info_table.c.pk >= 3: 'gt3', }, else_='other'), info_table.c.pk, info_table.c.info ], from_obj=[info_table]) assert query.execute().fetchall() == [ ('lessthan3', 1, 'pk_1_data'), ('lessthan3', 2, 'pk_2_data'), ('gt3', 3, 'pk_3_data'), ('gt3', 4, 'pk_4_data'), ('gt3', 5, 'pk_5_data'), ('gt3', 6, 'pk_6_data') ] simple_query = select( [ case( {1: 'one', 2: 'two', }, value=info_table.c.pk, else_='other'), info_table.c.pk ], whereclause=info_table.c.pk < 4, from_obj=[info_table]) assert simple_query.execute().fetchall() == [ ('one', 1), ('two', 2), ('other', 3), ]
[ [ [ 31, 44 ], [ 3265, 3278 ] ], [ [ 46, 49 ], [ 4444, 4447 ], [ 4633, 4636 ] ], [ [ 81, 89 ], [ 324, 332 ] ], [ [ 91, 109 ], [ 343, 361 ] ], [ [ 139, 146 ], [ 1064, 1071 ], [ 1116, 1123 ], [ 4789, 4796 ], [ 464, 471 ], [ 4399, 4406 ] ], [ [ 148, 151 ], [ 3279, 3282 ] ], [ [ 153, 157 ], [ 1223, 1227 ], [ 2488, 2492 ], [ 3298, 3302 ], [ 3360, 3364 ], [ 3499, 3503 ], [ 3738, 3742 ], [ 4066, 4070 ], [ 4924, 4928 ], [ 5555, 5559 ] ], [ [ 159, 165 ], [ 1185, 1191 ], [ 2071, 2077 ], [ 2450, 2456 ], [ 3692, 3698 ], [ 4020, 4026 ], [ 4886, 4892 ], [ 5517, 5523 ] ], [ [ 167, 181 ], [ 4226, 4240 ], [ 4283, 4297 ] ], [ [ 183, 187 ], [ 3898, 3902 ], [ 3945, 3949 ] ], [ [ 189, 193 ], [ 1365, 1369 ], [ 2635, 2639 ] ], [ [ 195, 202 ], [ 586, 593 ], [ 2570, 2577 ] ], [ [ 204, 208 ], [ 2562, 2566 ] ], [ [ 214, 220 ], [ 641, 647 ] ], [ [ 222, 228 ], [ 573, 579 ], [ 626, 632 ] ], [ [ 230, 235 ], [ 523, 528 ] ], [ [ 237, 245 ], [ 455, 463 ] ], [ [ 274, 279 ], [ 3226, 3231 ] ], [ [ 281, 287 ], [ 3240, 3246 ] ], [ [ 289, 299 ], [ 1040, 1050 ], [ 1276, 1286 ], [ 1370, 1380 ], [ 1392, 1402 ], [ 1479, 1489 ], [ 1496, 1506 ], [ 1526, 1536 ], [ 2541, 2551 ], [ 2673, 2683 ], [ 2695, 2705 ], [ 2807, 2817 ], [ 2824, 2834 ], [ 2866, 2876 ], [ 3832, 3842 ], [ 3987, 3997 ], [ 4160, 4170 ], [ 4352, 4362 ], [ 4976, 4986 ], [ 5034, 5044 ], [ 5118, 5128 ], [ 5135, 5145 ], [ 5190, 5200 ], [ 5631, 5641 ], [ 5680, 5690 ], [ 5735, 5745 ], [ 5778, 5788 ] ], [ [ 315, 323 ] ], [ [ 510, 520 ], [ 663, 673 ], [ 692, 702 ] ] ]
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import apache_beam as beam from apache_beam.runners.interactive.user_pipeline_tracker import UserPipelineTracker class UserPipelineTrackerTest(unittest.TestCase): def test_getting_unknown_pid_returns_none(self): ut = UserPipelineTracker() p = beam.Pipeline() self.assertIsNone(ut.get_pipeline(str(id(p)))) def test_getting_unknown_pipeline_returns_none(self): ut = UserPipelineTracker() p = beam.Pipeline() self.assertIsNone(ut.get_user_pipeline(p)) def test_no_parent_returns_none(self): ut = UserPipelineTracker() user = beam.Pipeline() derived = beam.Pipeline() orphan = beam.Pipeline() ut.add_derived_pipeline(user, derived) self.assertIsNone(ut.get_user_pipeline(orphan)) def test_get_user_pipeline_is_same(self): ut = UserPipelineTracker() p = beam.Pipeline() ut.add_user_pipeline(p) self.assertIs(ut.get_user_pipeline(p), p) def test_can_add_derived(self): ut = UserPipelineTracker() user = beam.Pipeline() derived = beam.Pipeline() ut.add_derived_pipeline(user, derived) self.assertIs(ut.get_user_pipeline(derived), user) def test_can_add_multiple_derived(self): """Tests that there can be many user pipelines with many derived pipelines. """ ut = UserPipelineTracker() # Add the first set of user and derived pipelines. user1 = beam.Pipeline() derived11 = beam.Pipeline() derived12 = beam.Pipeline() ut.add_derived_pipeline(user1, derived11) ut.add_derived_pipeline(user1, derived12) # Add the second set of user and derived pipelines. user2 = beam.Pipeline() derived21 = beam.Pipeline() derived22 = beam.Pipeline() ut.add_derived_pipeline(user2, derived21) ut.add_derived_pipeline(user2, derived22) # Assert that the user pipelines are correct. self.assertIs(ut.get_user_pipeline(derived11), user1) self.assertIs(ut.get_user_pipeline(derived12), user1) self.assertIs(ut.get_user_pipeline(derived21), user2) self.assertIs(ut.get_user_pipeline(derived22), user2) def test_cannot_have_multiple_parents(self): ut = UserPipelineTracker() user1 = beam.Pipeline() user2 = beam.Pipeline() derived = beam.Pipeline() ut.add_derived_pipeline(user1, derived) with self.assertRaises(AssertionError): ut.add_derived_pipeline(user2, derived) self.assertIs(ut.get_user_pipeline(derived), user1) def test_adding_derived_with_derived_gets_user_pipeline(self): """Tests that one can correctly add a derived pipeline from a derived pipeline and still get the correct user pipeline. """ ut = UserPipelineTracker() user = beam.Pipeline() derived1 = beam.Pipeline() derived2 = beam.Pipeline() # Add the first derived pipeline to the user pipelne. ut.add_derived_pipeline(user, derived1) # Add the second derived pipeline to the first derived pipeline. This should # get the user pipeline of the first and add the second to it. ut.add_derived_pipeline(derived1, derived2) # Asserts that both derived pipelines are under the same user pipeline. self.assertIs(ut.get_user_pipeline(derived1), user) self.assertIs(ut.get_user_pipeline(derived2), user) def test_can_get_pipeline_from_id(self): """Tests the pid -> pipeline memoization.""" ut = UserPipelineTracker() user = beam.Pipeline() derived = beam.Pipeline() ut.add_user_pipeline(user) ut.add_derived_pipeline(user, derived) self.assertIs(ut.get_pipeline(str(id(user))), user) self.assertIs(ut.get_pipeline(str(id(derived))), derived) def test_clear(self): ut = UserPipelineTracker() user = beam.Pipeline() derived = beam.Pipeline() ut.add_derived_pipeline(user, derived) self.assertIs(ut.get_user_pipeline(derived), user) ut.clear() self.assertIsNone(ut.get_user_pipeline(user)) self.assertIsNone(ut.get_user_pipeline(derived)) def test_can_iterate(self): ut = UserPipelineTracker() user1 = beam.Pipeline() derived11 = beam.Pipeline() derived12 = beam.Pipeline() ut.add_derived_pipeline(user1, derived11) ut.add_derived_pipeline(user1, derived12) user2 = beam.Pipeline() derived21 = beam.Pipeline() derived22 = beam.Pipeline() ut.add_derived_pipeline(user2, derived21) ut.add_derived_pipeline(user2, derived22) user_pipelines = set(p for p in ut) self.assertSetEqual(set([user1, user2]), user_pipelines) def test_can_evict_user_pipeline(self): ut = UserPipelineTracker() user1 = beam.Pipeline() derived11 = beam.Pipeline() derived12 = beam.Pipeline() ut.add_derived_pipeline(user1, derived11) ut.add_derived_pipeline(user1, derived12) user2 = beam.Pipeline() derived21 = beam.Pipeline() derived22 = beam.Pipeline() ut.add_derived_pipeline(user2, derived21) ut.add_derived_pipeline(user2, derived22) ut.evict(user1) self.assertIsNone(ut.get_user_pipeline(user1)) self.assertIsNone(ut.get_user_pipeline(derived11)) self.assertIsNone(ut.get_user_pipeline(derived12)) self.assertIs(user2, ut.get_user_pipeline(derived21)) self.assertIs(user2, ut.get_user_pipeline(derived22)) if __name__ == '__main__': unittest.main()
[ [ [ 792, 800 ], [ 947, 955 ], [ 6071, 6079 ] ], [ [ 809, 828 ], [ 1058, 1062 ], [ 1223, 1227 ], [ 1372, 1376 ], [ 1402, 1406 ], [ 1431, 1435 ], [ 1629, 1633 ], [ 1798, 1802 ], [ 1828, 1832 ], [ 2179, 2183 ], [ 2211, 2215 ], [ 2243, 2247 ], [ 2421, 2425 ], [ 2453, 2457 ], [ 2485, 2489 ], [ 2969, 2973 ], [ 2997, 3001 ], [ 3027, 3031 ], [ 3481, 3485 ], [ 3512, 3516 ], [ 3543, 3547 ], [ 4184, 4188 ], [ 4214, 4218 ], [ 4492, 4496 ], [ 4522, 4526 ], [ 4833, 4837 ], [ 4865, 4869 ], [ 4897, 4901 ], [ 5019, 5023 ], [ 5051, 5055 ], [ 5083, 5087 ], [ 5381, 5385 ], [ 5413, 5417 ], [ 5445, 5449 ], [ 5567, 5571 ], [ 5599, 5603 ], [ 5631, 5635 ] ], [ [ 895, 914 ], [ 1027, 1046 ], [ 1192, 1211 ], [ 1338, 1357 ], [ 1598, 1617 ], [ 1764, 1783 ], [ 2089, 2108 ], [ 2934, 2953 ], [ 3447, 3466 ], [ 4150, 4169 ], [ 4458, 4477 ], [ 4798, 4817 ], [ 5346, 5365 ] ], [ [ 923, 946 ] ] ]
# VARIAVEIS # ATRIBUINDO VALOR A UMA VARIAVEL var_teste = 1 print(var_teste) print(type(var_teste)) #DECLARAÇÂO MULTIPLA pessoa1, pessoa2, pessoa3 = 'Jose', 'Joao','Maria' print(pessoa1) print(pessoa2) print(pessoa3) # VARIAVEL COM ATRIBUIÇÃO pessoa1=pessoa2=pessoa3 = 'Jose' print(pessoa1) print(pessoa2) print(pessoa3) # OPERAÇÂO COM VARIAVEL idade = 32 idade1 = 28 print('SOMA',idade + idade1) print('SUBTRAÇÃO',idade - idade1) print('MULTIPLICAÇÂO',idade * idade1) print('DIVISAO',idade / idade1) print('POTENCIA',idade ** idade1) print('DIVISAO INTEIRO',idade // idade1) print('RESTO DIVISÂO',idade % idade1)
[ [ [ 46, 55 ], [ 66, 75 ], [ 88, 97 ] ], [ [ 123, 130 ], [ 180, 187 ] ], [ [ 132, 139 ], [ 195, 202 ] ], [ [ 141, 148 ], [ 210, 217 ] ], [ [ 246, 253 ], [ 285, 292 ] ], [ [ 254, 261 ], [ 300, 307 ] ], [ [ 262, 269 ], [ 315, 322 ] ], [ [ 349, 354 ], [ 386, 391 ], [ 422, 427 ], [ 460, 465 ], [ 490, 495 ], [ 523, 528 ], [ 564, 569 ], [ 604, 609 ] ], [ [ 360, 366 ], [ 394, 400 ], [ 430, 436 ], [ 468, 474 ], [ 498, 504 ], [ 532, 538 ], [ 573, 579 ], [ 612, 618 ] ] ]
#-*- coding:utf-8 -*- import json import copy import requests import json from flask import render_template, abort, request, url_for, redirect, g import time import datetime from rrd import app from rrd.model.screen import DashboardScreen from rrd.model.graph import DashboardGraph from rrd import consts from rrd.utils.graph_urls import generate_graph_urls from rrd import config @app.route("/screen", methods=["GET", "POST"]) def dash_screens(): top_screens = DashboardScreen.gets(pid='0') top_screens = sorted(top_screens, key=lambda x:x.name) return render_template("screen/index.html", **locals()) @app.route("/screen/<int:sid>/delete") def dash_screen_delete(sid): screen = DashboardScreen.get(sid) if not screen: abort(404, "no such screen") DashboardScreen.remove(sid) return redirect("/screen") @app.route("/screen/<int:sid>/edit", methods=["GET", "POST"]) def dash_screen_edit(sid): screen = DashboardScreen.get(sid) if not screen: abort(404, "no such screen") if request.method == "POST": screen_name = request.form.get("screen_name") screen.update(name=screen_name) return redirect("/screen/%s" %screen.id) else: return render_template("screen/edit.html", **locals()) @app.route("/screen/<int:sid>/clone", methods=["GET", "POST"]) def dash_screen_clone(sid): screen = DashboardScreen.get(sid) if not screen: abort(404, "no such screen") if request.method == "POST": screen_name = request.form.get("screen_name") with_graph = request.form.get("with_graph") new_s = DashboardScreen.add(screen.pid, screen_name) if not new_s: abort(404, "创建screen失败了") if with_graph: old_graphs = DashboardGraph.gets_by_screen_id(sid) for o in old_graphs: DashboardGraph.add(o.title, o.hosts, o.counters, new_s.id, o.timespan, o.graph_type, o.method, o.position) return redirect("/screen/%s" %new_s.id) else: return render_template("screen/clone.html", **locals()) @app.route("/graph/<int:gid>/delete") def dash_graph_delete(gid): graph = DashboardGraph.get(gid) if not graph: abort(404, "no such graph") DashboardGraph.remove(gid) return redirect("/screen/" + graph.screen_id) @app.route("/screen/<int:sid>") def dash_screen(sid): start = request.args.get("start") end = request.args.get("end") top_screens = DashboardScreen.gets(pid=0) top_screens = sorted(top_screens, key=lambda x:x.name) screen = DashboardScreen.get(sid) if not screen: abort(404, "no screen") if str(screen.pid) == '0': sub_screens = DashboardScreen.gets(pid=sid) sub_screens = sorted(sub_screens, key=lambda x:x.name) return render_template("screen/top_screen.html", **locals()) pscreen = DashboardScreen.get(screen.pid) sub_screens = DashboardScreen.gets(pid=screen.pid) sub_screens = sorted(sub_screens, key=lambda x:x.name) graphs = DashboardGraph.gets_by_screen_id(screen.id) all_graphs = [] for graph in graphs: all_graphs.extend(generate_graph_urls(graph, start, end) or []) all_graphs = sorted(all_graphs, key=lambda x:x.position) return render_template("screen/screen.html", **locals()) @app.route("/screen/embed/<int:sid>") def dash_screen_embed(sid): start = request.args.get("start") end = request.args.get("end") screen = DashboardScreen.get(sid) if not screen: abort(404, "no screen") if screen.pid == '0': abort(404, "top screen") graphs = DashboardGraph.gets_by_screen_id(screen.id) all_graphs = [] for graph in graphs: all_graphs.extend(generate_graph_urls(graph, start, end) or []) all_graphs = sorted(all_graphs, key=lambda x:x.position) return render_template("screen/screen_embed.html", **locals()) @app.route("/screen/add", methods=["GET", "POST"]) def dash_screen_add(): if request.method == "POST": name = request.form.get("screen_name") pid = request.form.get("pid", '0') screen = DashboardScreen.add(pid, name) return redirect("/screen/%s" % screen.id) else: pid = request.args.get("pid", '0') screen = DashboardScreen.get(pid) return render_template("screen/add.html", **locals()) @app.route("/screen/<int:sid>/graph", methods=["GET", "POST"]) def dash_graph_add(sid): all_screens = DashboardScreen.gets() top_screens = [x for x in all_screens if x.pid == '0'] children = [] for t in top_screens: children.append([x for x in all_screens if x.pid == t.id]) screen = DashboardScreen.get(sid) if not screen: abort(404, "no screen") pscreen = DashboardScreen.get(screen.pid) if request.method == "POST": title = request.form.get("title") hosts = request.form.get("hosts", "").strip() hosts = hosts and hosts.split("\n") or [] hosts = [x.strip() for x in hosts] counters = request.form.get("counters", "").strip() counters = counters and counters.split("\n") or [] counters = [x.strip() for x in counters] timespan = request.form.get("timespan", 3600) graph_type = request.form.get("graph_type", 'h') method = request.form.get("method", '').upper() position = request.form.get("position", 0) graph = DashboardGraph.add(title, hosts, counters, sid, timespan, graph_type, method, position) return redirect("/screen/%s" % sid) else: gid = request.args.get("gid") graph = gid and DashboardGraph.get(gid) return render_template("screen/graph_add.html", config=config, **locals()) @app.route("/graph/<int:gid>/edit", methods=["GET", "POST"]) def dash_graph_edit(gid): error = "" graph = DashboardGraph.get(gid) if not graph: abort(404, "no graph") all_screens = DashboardScreen.gets() top_screens = [x for x in all_screens if x.pid == '0'] children = [] for t in top_screens: children.append([x for x in all_screens if x.pid == t.id]) screen = DashboardScreen.get(graph.screen_id) if not screen: abort(404, "no screen") pscreen = DashboardScreen.get(screen.pid) if request.method == "POST": ajax = request.form.get("ajax", "") screen_id = request.form.get("screen_id") title = request.form.get("title", "").strip() hosts = request.form.get("hosts", "").strip() hosts = hosts and hosts.split("\n") or [] hosts = [x.strip() for x in hosts] counters = request.form.get("counters", "").strip() counters = counters and counters.split("\n") or [] counters = [x.strip() for x in counters] timespan = request.form.get("timespan", 3600) graph_type = request.form.get("graph_type", 'h') method = request.form.get("method", '').upper() position = request.form.get("position", 0) graph = graph.update(title, hosts, counters, screen_id, timespan, graph_type, method, position) error = u"修改成功了" if not ajax: return render_template("screen/graph_edit.html", config=config, **locals()) else: return "ok" else: ajax = request.args.get("ajax", "") return render_template("screen/graph_edit.html", **locals()) @app.route("/graph/multi_edit", methods=["GET", "POST"]) def dash_graph_multi_edit(): ret = { "ok": False, "msg": "", "data": [], } if request.method == "POST": d = request.data try: jdata = json.loads(d) except ValueError: jdata = None if not jdata: return json.dumps({ "ok": False, "msg": "no_data_post", }) rows = [] for x in jdata: rows.append({"id": x["id"], "hosts": x["endpoints"], "counters": x["counters"]}) DashboardGraph.update_multi(rows) return json.dumps({ "ok": True, "msg": "", }) elif request.method == "GET": sid = request.args.get("sid") if not sid or not DashboardScreen.get(sid): ret["msg"] = "no_screen" return json.dumps(ret) ret["ok"] = True graphs = DashboardGraph.gets_by_screen_id(sid) ret['data'] = [{"id": x.id, "title": x.title, "endpoints":x.hosts, "counters":x.counters} for x in graphs] return json.dumps(ret)
[ [ [ 29, 33 ] ], [ [ 41, 45 ] ], [ [ 53, 61 ] ], [ [ 69, 73 ], [ 7750, 7754 ], [ 7858, 7862 ], [ 8156, 8160 ], [ 8418, 8422 ], [ 8653, 8657 ] ], [ [ 92, 107 ], [ 570, 585 ], [ 1232, 1247 ], [ 2069, 2084 ], [ 2841, 2856 ], [ 3306, 3321 ], [ 3895, 3910 ], [ 4358, 4373 ], [ 5730, 5745 ], [ 7251, 7266 ], [ 7428, 7443 ] ], [ [ 109, 114 ], [ 753, 758 ], [ 1001, 1006 ], [ 1437, 1442 ], [ 1702, 1707 ], [ 2247, 2252 ], [ 2655, 2660 ], [ 3561, 3566 ], [ 3620, 3625 ], [ 4771, 4776 ], [ 5963, 5968 ], [ 6276, 6281 ] ], [ [ 116, 123 ], [ 1038, 1045 ], [ 1086, 1093 ], [ 1474, 1481 ], [ 1522, 1529 ], [ 1575, 1582 ], [ 2423, 2430 ], [ 2459, 2466 ], [ 3435, 3442 ], [ 3471, 3478 ], [ 4034, 4041 ], [ 4075, 4082 ], [ 4121, 4128 ], [ 4272, 4279 ], [ 4849, 4856 ], [ 4891, 4898 ], [ 4934, 4941 ], [ 5085, 5092 ], [ 5254, 5261 ], [ 5310, 5317 ], [ 5363, 5370 ], [ 5421, 5428 ], [ 5643, 5650 ], [ 6354, 6361 ], [ 6395, 6402 ], [ 6444, 6451 ], [ 6490, 6497 ], [ 6545, 6552 ], [ 6696, 6703 ], [ 6865, 6872 ], [ 6921, 6928 ], [ 6974, 6981 ], [ 7032, 7039 ], [ 7384, 7391 ], [ 7666, 7673 ], [ 7704, 7711 ], [ 8247, 8254 ], [ 8286, 8293 ] ], [ [ 125, 132 ] ], [ [ 134, 142 ], [ 826, 834 ], [ 1173, 1181 ], [ 2011, 2019 ], [ 2317, 2325 ], [ 4213, 4221 ], [ 5589, 5597 ] ], [ [ 144, 145 ] ], [ [ 153, 157 ] ], [ [ 165, 173 ] ], [ [ 191, 194 ], [ 385, 388 ], [ 621, 624 ], [ 848, 851 ], [ 1282, 1285 ], [ 2120, 2123 ], [ 2358, 2361 ], [ 3358, 3361 ], [ 3954, 3957 ], [ 4407, 4410 ], [ 5800, 5803 ], [ 7484, 7487 ] ], [ [ 224, 239 ], [ 469, 484 ], [ 701, 716 ], [ 786, 801 ], [ 949, 964 ], [ 1385, 1400 ], [ 1623, 1638 ], [ 2502, 2517 ], [ 2603, 2618 ], [ 2733, 2748 ], [ 2910, 2925 ], [ 2960, 2975 ], [ 3509, 3524 ], [ 4167, 4182 ], [ 4318, 4333 ], [ 4512, 4527 ], [ 4719, 4734 ], [ 4809, 4824 ], [ 6005, 6020 ], [ 6212, 6227 ], [ 6314, 6329 ], [ 8336, 8351 ] ], [ [ 268, 282 ], [ 1777, 1791 ], [ 1864, 1878 ], [ 2197, 2211 ], [ 2279, 2293 ], [ 3069, 3083 ], [ 3659, 3673 ], [ 5470, 5484 ], [ 5691, 5705 ], [ 5913, 5927 ], [ 8105, 8119 ], [ 8485, 8499 ] ], [ [ 299, 305 ] ], [ [ 339, 358 ], [ 3186, 3205 ], [ 3775, 3794 ] ], [ [ 376, 382 ], [ 5778, 5784 ], [ 7300, 7306 ] ], [ [ 435, 447 ] ], [ [ 663, 681 ] ], [ [ 913, 929 ] ], [ [ 1348, 1365 ] ], [ [ 2161, 2178 ] ], [ [ 2393, 2404 ] ], [ [ 3399, 3416 ] ], [ [ 4008, 4023 ] ], [ [ 4473, 4487 ] ], [ [ 5864, 5879 ] ], [ [ 7544, 7565 ] ] ]
import torch import torch.nn.functional as F from torch.nn import Linear from torch_geometric.nn import (ASAPooling, GraphConv, global_mean_pool, JumpingKnowledge) class ASAP(torch.nn.Module): def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, num_layers, hidden, ratio=0.8, dropout=0, num_class=0): super(ASAP, self).__init__() self.num_class = num_class self.max_seq_len = max_seq_len self.node_encoder = node_encoder self.conv1 = GraphConv(emb_dim, hidden, aggr='mean') self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.convs.extend([ GraphConv(hidden, hidden, aggr='mean') for i in range(num_layers - 1) ]) self.pools.extend([ ASAPooling(hidden, ratio, dropout=dropout) for i in range((num_layers) // 2) ]) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) # self.lin2 = Linear(hidden, dataset.num_classes) if self.num_class > 0: # classification self.graph_pred_linear = torch.nn.Linear(hidden, self.num_class) else: self.graph_pred_linear_list = torch.nn.ModuleList() for i in range(max_seq_len): self.graph_pred_linear_list.append(torch.nn.Linear(hidden, num_vocab)) def reset_parameters(self): self.conv1.reset_parameters() for conv in self.convs: conv.reset_parameters() for pool in self.pools: pool.reset_parameters() self.lin1.reset_parameters() self.lin2.reset_parameters() def forward(self, data): x, edge_index, node_depth, batch = data.x, data.edge_index, data.node_depth, data.batch x = self.node_encoder(x, node_depth.view(-1, )) edge_weight = None x = F.relu(self.conv1(x, edge_index)) xs = [global_mean_pool(x, batch)] for i, conv in enumerate(self.convs): x = conv(x=x, edge_index=edge_index, edge_weight=edge_weight) x = F.relu(x) xs += [global_mean_pool(x, batch)] if i % 2 == 0 and i < len(self.convs) - 1: pool = self.pools[i // 2] x, edge_index, edge_weight, batch, _ = pool( x=x, edge_index=edge_index, edge_weight=edge_weight, batch=batch) x = self.jump(xs) x = F.relu(self.lin1(x)) x = F.dropout(x, p=0.5, training=self.training) # x = self.lin2(x) # return F.log_softmax(x, dim=-1) if self.num_class > 0: return self.graph_pred_linear(x) pred_list = [] for i in range(self.max_seq_len): pred_list.append(self.graph_pred_linear_list[i](x)) return pred_list def __repr__(self): return self.__class__.__name__
[ [ [ 7, 12 ], [ 241, 246 ], [ 621, 626 ], [ 664, 669 ], [ 1209, 1214 ], [ 1305, 1310 ], [ 1419, 1424 ] ], [ [ 20, 44 ], [ 1959, 1960 ], [ 2171, 2172 ], [ 2530, 2531 ], [ 2563, 2564 ] ], [ [ 66, 72 ], [ 1028, 1034 ] ], [ [ 105, 115 ], [ 859, 869 ] ], [ [ 149, 158 ], [ 560, 569 ], [ 726, 735 ] ], [ [ 160, 176 ], [ 2007, 2023 ], [ 2200, 2216 ] ], [ [ 210, 226 ], [ 979, 995 ] ], [ [ 236, 240 ], [ 399, 403 ] ] ]
#!/usr/bin/env python import rospy from duckietown_msgs.msg import WheelsCmdStamped, FSMState class WheelsCmdSwitchNode(object): def __init__(self): self.node_name = rospy.get_name() rospy.loginfo("[%s] Initializing " %(self.node_name)) # Read parameters self.mappings = rospy.get_param("~mappings") source_topic_dict = rospy.get_param("~source_topics") self.current_src_name = None # Construct publisher self.pub_cmd = rospy.Publisher("~wheels_cmd",WheelsCmdStamped,queue_size=1) # Construct subscribers self.sub_fsm_state = rospy.Subscriber(rospy.get_param("~mode_topic"),FSMState,self.cbFSMState) self.sub_dict = dict() for src_name, topic_name in source_topic_dict.items(): self.sub_dict[src_name] = rospy.Subscriber(topic_name,WheelsCmdStamped,self.cbWheelsCmd,callback_args=src_name) def cbFSMState(self,fsm_state_msg): self.current_src_name = self.mappings.get(fsm_state_msg.state) if self.current_src_name is None: rospy.logwarn("[%s] FSMState %s not handled. No msg pass through the switch." %(self.node_name,fsm_state_msg.state)) def cbWheelsCmd(self,msg,src_name): if src_name == self.current_src_name: self.pub_cmd.publish(msg) def on_shutdown(self): rospy.loginfo("[%s] Shutting down." %(self.node_name)) if __name__ == '__main__': # Initialize the node with rospy rospy.init_node('wheels_cmd_switch_node', anonymous=False) # Create the DaguCar object node = WheelsCmdSwitchNode() # Setup proper shutdown behavior rospy.on_shutdown(node.on_shutdown) # Keep it spinning to keep the node alive rospy.spin()
[ [ [ 29, 34 ], [ 1480, 1485 ], [ 1646, 1651 ], [ 1732, 1737 ], [ 178, 183 ], [ 203, 208 ], [ 306, 311 ], [ 363, 368 ], [ 488, 493 ], [ 619, 624 ], [ 636, 641 ], [ 826, 831 ], [ 1078, 1083 ], [ 1356, 1361 ] ], [ [ 67, 83 ], [ 518, 534 ], [ 854, 870 ] ], [ [ 85, 93 ], [ 667, 675 ] ], [ [ 100, 119 ], [ 1582, 1601 ] ], [ [ 1575, 1579 ], [ 1664, 1668 ] ] ]
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.SettleEntity import SettleEntity class AlipayTradeSettleReceivablesQueryModel(object): def __init__(self): self._biz_product = None self._extend_params = None self._merchant_info = None self._out_request_no = None @property def biz_product(self): return self._biz_product @biz_product.setter def biz_product(self, value): self._biz_product = value @property def extend_params(self): return self._extend_params @extend_params.setter def extend_params(self, value): self._extend_params = value @property def merchant_info(self): return self._merchant_info @merchant_info.setter def merchant_info(self, value): if isinstance(value, SettleEntity): self._merchant_info = value else: self._merchant_info = SettleEntity.from_alipay_dict(value) @property def out_request_no(self): return self._out_request_no @out_request_no.setter def out_request_no(self, value): self._out_request_no = value def to_alipay_dict(self): params = dict() if self.biz_product: if hasattr(self.biz_product, 'to_alipay_dict'): params['biz_product'] = self.biz_product.to_alipay_dict() else: params['biz_product'] = self.biz_product if self.extend_params: if hasattr(self.extend_params, 'to_alipay_dict'): params['extend_params'] = self.extend_params.to_alipay_dict() else: params['extend_params'] = self.extend_params if self.merchant_info: if hasattr(self.merchant_info, 'to_alipay_dict'): params['merchant_info'] = self.merchant_info.to_alipay_dict() else: params['merchant_info'] = self.merchant_info if self.out_request_no: if hasattr(self.out_request_no, 'to_alipay_dict'): params['out_request_no'] = self.out_request_no.to_alipay_dict() else: params['out_request_no'] = self.out_request_no return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayTradeSettleReceivablesQueryModel() if 'biz_product' in d: o.biz_product = d['biz_product'] if 'extend_params' in d: o.extend_params = d['extend_params'] if 'merchant_info' in d: o.merchant_info = d['merchant_info'] if 'out_request_no' in d: o.out_request_no = d['out_request_no'] return o
[ [ [ 53, 57 ] ], [ [ 110, 111 ] ], [ [ 159, 171 ], [ 907, 919 ], [ 1010, 1022 ] ], [ [ 180, 218 ], [ 2403, 2441 ] ] ]
import os from deta import Deta from datetime import date, datetime from fastapi import HTTPException import urllib import base64 deta = Deta() base = deta.Base("drawings") drive = deta.Drive("drawings") def get_all(db, query): blob_gen = db.fetch(query) blobs = [] for stored_blob in blob_gen: for blob in stored_blob: blobs.append(blob) return blobs # list all drawings def get_drawings(): try: return get_all(base, {}) except: return None # save existing drawing def save(name, file): encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8') b = base.get(encoded_name) try: if (b): base.put({"key": encoded_name, "name": name, "public": b["public"], "lastModified": datetime.utcnow().timestamp()}) return drive.put(name, file) base.put({"key":encoded_name, "name": name, "public": False, "lastModified": datetime.utcnow().timestamp()}) return drive.put(name, file) except: return None # save def save_as(name, file, overwrite): encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8') b = base.get(encoded_name) record = {"key": encoded_name, "name":name, "public": False, 'lastModified': datetime.utcnow().timestamp()} if (overwrite or not b): # Overwrite allowed or Record Does not Exist base.put(record) drive.put(name, file) return record else: # Overwrite False and Record Exists return None def get_drawing(name): encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8') b = base.get(encoded_name) d = drive.get(name) if (b and d): return d.read() base.delete(encoded_name) drive.delete(name) return None def get_metadata(name): encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8') b = base.get(encoded_name) if (b): return b return None def delete_drawing(name): encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8') try: base.delete(encoded_name) drive.delete(name) return name except: return None def modify_public(name, public): encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8') try: b = base.get(encoded_name) if (b): b["public"] = public return base.put(b) return None except: return None def get_public_drawing(name): encoded_name = str(base64.urlsafe_b64encode(name.encode("utf-8")), 'utf-8') try: b = base.get(encoded_name) if (b and b["public"]): return drive.get(name) return None except: return None
[ [ [ 7, 9 ] ], [ [ 27, 31 ], [ 138, 142 ] ], [ [ 53, 57 ] ], [ [ 59, 67 ], [ 787, 795 ], [ 945, 953 ], [ 1282, 1290 ] ], [ [ 88, 101 ] ], [ [ 109, 115 ] ], [ [ 123, 129 ], [ 578, 584 ], [ 1113, 1119 ], [ 1578, 1584 ], [ 1849, 1855 ], [ 2032, 2038 ], [ 2272, 2278 ], [ 2563, 2569 ] ], [ [ 131, 135 ], [ 153, 157 ], [ 184, 188 ] ], [ [ 146, 150 ], [ 466, 470 ], [ 643, 647 ], [ 703, 707 ], [ 868, 872 ], [ 1178, 1182 ], [ 1395, 1399 ], [ 1643, 1647 ], [ 1736, 1740 ], [ 1914, 1918 ], [ 2106, 2110 ], [ 2350, 2354 ], [ 2441, 2445 ], [ 2641, 2645 ] ], [ [ 176, 181 ], [ 838, 843 ], [ 992, 997 ], [ 1420, 1425 ], [ 1674, 1679 ], [ 1766, 1771 ], [ 2140, 2145 ], [ 2715, 2720 ] ], [ [ 213, 220 ], [ 458, 465 ] ], [ [ 418, 430 ] ], [ [ 537, 541 ] ], [ [ 1058, 1065 ] ], [ [ 1536, 1547 ] ], [ [ 1806, 1818 ] ], [ [ 1987, 2001 ] ], [ [ 2220, 2233 ] ], [ [ 2514, 2532 ] ] ]
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Simulator instruction to save statevector amplitudes and amplitudes squared. """ from qiskit.circuit import QuantumCircuit from qiskit.extensions.exceptions import ExtensionError from .save_data import SaveSingleData, SaveAverageData, default_qubits class SaveAmplitudes(SaveSingleData): """Save complex statevector amplitudes.""" def __init__(self, key, num_qubits, params, pershot=False, conditional=False): """Instruction to save complex statevector amplitudes. Args: key (str): the key for retrieving saved data from results. num_qubits (int): the number of qubits for the snapshot type. params (list): list of entries to vale. pershot (bool): if True save a list of amplitudes vectors for each shot of the simulation rather than the a single amplitude vector [Default: False]. conditional (bool): if True save the amplitudes vector conditional on the current classical register values [Default: False]. Raises: ExtensionError: if params is invalid for the specified number of qubits. """ params = _format_amplitude_params(params, num_qubits) super().__init__("save_amplitudes", key, num_qubits, pershot=pershot, conditional=conditional, params=params) class SaveAmplitudesSquared(SaveAverageData): """Save squared statevector amplitudes (probabilities).""" def __init__(self, key, num_qubits, params, unnormalized=False, pershot=False, conditional=False): """Instruction to save squared statevector amplitudes (probabilities). Args: key (str): the key for retrieving saved data from results. num_qubits (int): the number of qubits for the snapshot type. params (list): list of entries to vale. unnormalized (bool): If True return save the unnormalized accumulated probabilities over all shots [Default: False]. pershot (bool): if True save a list of probability vectors for each shot of the simulation rather than the a single amplitude vector [Default: False]. conditional (bool): if True save the probability vector conditional on the current classical register values [Default: False]. Raises: ExtensionError: if params is invalid for the specified number of qubits. """ params = _format_amplitude_params(params, num_qubits) super().__init__("save_amplitudes_sq", key, num_qubits, unnormalized=unnormalized, pershot=pershot, conditional=conditional, params=params) def save_amplitudes(self, key, params, pershot=False, conditional=False): """Save complex statevector amplitudes. Args: key (str): the key for retrieving saved data from results. params (List[int] or List[str]): the basis states to return amplitudes for. pershot (bool): if True save a list of amplitudes vectors for each shot of the simulation rather than the a single amplitude vector [Default: False]. conditional (bool): if True save the amplitudes vector conditional on the current classical register values [Default: False]. Returns: QuantumCircuit: with attached instruction. Raises: ExtensionError: if params is invalid for the specified number of qubits. """ qubits = default_qubits(self) instr = SaveAmplitudes(key, len(qubits), params, pershot=pershot, conditional=conditional) return self.append(instr, qubits) def save_amplitudes_squared(self, key, params, unnormalized=False, pershot=False, conditional=False): """Save squared statevector amplitudes (probabilities). Args: key (str): the key for retrieving saved data from results. params (List[int] or List[str]): the basis states to return amplitudes for. unnormalized (bool): If True return save the unnormalized accumulated probabilities over all shots [Default: False]. pershot (bool): if True save a list of probability vectors for each shot of the simulation rather than the a single amplitude vector [Default: False]. conditional (bool): if True save the probability vector conditional on the current classical register values [Default: False]. Returns: QuantumCircuit: with attached instruction. Raises: ExtensionError: if params is invalid for the specified number of qubits. """ qubits = default_qubits(self) instr = SaveAmplitudesSquared(key, len(qubits), params, unnormalized=unnormalized, pershot=pershot, conditional=conditional) return self.append(instr, qubits) def _format_amplitude_params(params, num_qubits=None): """Format amplitude params as a interger list.""" if isinstance(params[0], str): if params[0].find('0x') == 0: params = [int(i, 16) for i in params] else: params = [int(i, 2) for i in params] if num_qubits and max(params) >= 2 ** num_qubits: raise ExtensionError( "Param values contain a state larger than the number of qubits") return params QuantumCircuit.save_amplitudes = save_amplitudes QuantumCircuit.save_amplitudes_squared = save_amplitudes_squared
[ [ [ 596, 610 ], [ 6721, 6735 ], [ 6770, 6784 ] ], [ [ 652, 666 ], [ 6608, 6622 ] ], [ [ 690, 704 ], [ 761, 775 ] ], [ [ 706, 721 ], [ 2137, 2152 ] ], [ [ 723, 737 ], [ 4630, 4644 ], [ 5953, 5967 ] ], [ [ 746, 760 ], [ 4663, 4677 ] ], [ [ 2115, 2136 ], [ 5986, 6007 ] ], [ [ 3778, 3793 ], [ 6754, 6769 ] ], [ [ 4817, 4840 ], [ 6811, 6834 ] ], [ [ 6249, 6273 ], [ 1819, 1843 ], [ 3429, 3453 ] ] ]
#!/usr/bin/env python import sys import re import optparse from ctypes import * """ This script will use the prototypes from "checkdocs.py -s" to concoct a 1:1 Python wrapper for Allegro. """ class _AL_UTF8String: pass class Allegro: def __init__(self): self.types = {} self.functions = {} self.constants = {} def add_struct(self, name): x = type(name, (Structure, ), {}) self.types[name] = x def add_union(self, name): x = type(name, (Union, ), {}) self.types[name] = x def get_type(self, ptype): conversion = { "bool": c_bool, "_Bool": c_bool, "char": c_byte, "unsignedchar": c_ubyte, "int": c_int, "unsigned": c_uint, "unsignedint": c_uint, "int16_t": c_int16, "uint16_t": c_uint16, "int32_t": c_int32, "uint32_t": c_uint32, "int64_t": c_int64, "uint64_t": c_uint64, "uintptr_t": c_void_p, "intptr_t": c_void_p, "GLuint": c_uint, "unsignedlong": c_ulong, "long": c_long, "size_t": c_size_t, "off_t": c_int64, "time_t": c_int64, "va_list": c_void_p, "float": c_float, "double": c_double, "al_fixed": c_int, "HWND": c_void_p, "char*": _AL_UTF8String, # hack: this probably shouldn't be in the public docs "postprocess_callback_t": c_void_p, } ptype = re.sub(r"\bstruct|union\b", "", ptype) ptype = re.sub(r"\bconst\b", "", ptype) ptype = re.sub(r"\bextern\b", "", ptype) ptype = re.sub(r"\b__inline__\b", "", ptype) ptype = re.sub(r"\s+", "", ptype) if ptype.endswith("*"): if ptype in conversion: return conversion[ptype] t = ptype[:-1] if t in self.types: return POINTER(self.types[t]) return c_void_p elif ptype in self.types: return self.types[ptype] else: try: return conversion[ptype] except KeyError: print("Type Error:" + str(ptype)) return None def parse_funcs(self, funcs): """ Go through all documented functions and add their prototypes as Python functions. The file should have been generated by Allegro's documentation generation scripts. """ for func in funcs: name, proto = func.split(":", 1) if not name.startswith("al_"): continue proto = proto.strip() name = name[:-2] if proto.startswith("enum"): continue if proto.startswith("typedef"): continue if "=" in proto: continue if proto.startswith("#"): continue funcstart = proto.find(name) funcend = funcstart + len(name) ret = proto[:funcstart].rstrip() params = proto[funcend:].strip(" ;") if params[0] != "(" or params[-1] != ")": print("Error:") print(params) continue params2 = params[1:-1] # remove callback argument lists balance = 0 params = "" for c in params2: if c == ")": balance -= 1 if balance == 0: params += c if c == "(": balance += 1 params = params.split(",") plist = [] for param in params: param = re.sub(r"\bconst\b", "", param) param = param.strip() if param == "void": continue if param == "": continue if param == "...": continue # treat arrays as a void pointer, for now if param.endswith("]") or param.endswith("*"): plist.append(c_void_p) continue # treat callbacks as a void pointer, for now if param.endswith(")"): plist.append(c_void_p) continue mob = re.match("^.*?(\w+)$", param) if mob: pnamepos = mob.start(1) if pnamepos == 0: # Seems the parameter is not named pnamepos = len(param) else: print(params) print(proto) print("") continue ptype = param[:pnamepos] ptype = self.get_type(ptype) plist.append(ptype) f = type("", (object, ), {"restype": c_int}) if not ret.endswith("void"): f.restype = self.get_type(ret) try: f.argtypes = plist except TypeError as e: print(e) print(name) print(plist) self.functions[name] = f def parse_protos(self, filename): protos = [] unions = [] funcs = [] # first pass: create all structs, but without fields for line in open(filename): name, proto = line.split(":", 1) proto = proto.lstrip() if name.endswith("()"): funcs.append(line) continue # anonymous structs have no name at all if name and not name.startswith("ALLEGRO_"): continue if name == "ALLEGRO_OGL_EXT_API": continue if proto.startswith("union") or\ proto.startswith("typedef union"): self.add_union(name) unions.append((name, proto)) elif proto.startswith("struct") or\ proto.startswith("typedef struct"): self.add_struct(name) protos.append((name, proto)) elif proto.startswith("enum") or\ proto.startswith("typedef enum"): if name: self.types[name] = c_int protos.append(("", proto)) elif proto.startswith("#define"): if not name.startswith("_") and not name.startswith("GL_"): i = eval(proto.split(None, 2)[2]) self.constants[name] = i else: # actual typedef mob = re.match("typedef (.*) " + name, proto) if mob: t = mob.group(1) self.types[name] = self.get_type(t.strip()) else: # Probably a function pointer self.types[name] = c_void_p protos += unions # second pass: fill in fields for name, proto in protos: bo = proto.find("{") if bo == -1: continue bc = proto.rfind("}") braces = proto[bo + 1:bc] if proto.startswith("enum") or \ proto.startswith("typedef enum"): fields = braces.split(",") i = 0 for field in fields: if "=" in field: fname, val = field.split("=", 1) fname = fname.strip() try: i = int(eval(val, globals(), self.constants)) except NameError: i = val else: fname = field.strip() if not fname: continue self.constants[fname] = i try: i += 1 except TypeError: pass continue balance = 0 fields = [""] for c in braces: if c == "{": balance += 1 if c == "}": balance -= 1 if c == ";" and balance == 0: fields.append("") else: fields[-1] += c flist = [] for field in fields: if not field: continue # add function pointer as void pointer mob = re.match(".*?\(\*(\w+)\)", field) if mob: flist.append((mob.group(1), "c_void_p")) continue # add any pointer as void pointer mob = re.match(".*?\*(\w+)$", field) if mob: flist.append((mob.group(1), "c_void_p")) continue # add an array mob = re.match("(.*)( \w+)\[(.*?)\]$", field) if mob: # this is all a hack n = 0 ftype = mob.group(1) if ftype.startswith("struct"): if ftype == "struct {float axis[3];}": t = "c_float * 3" else: print("Error: Can't parse " + ftype + " yet.") t = None else: n = mob.group(3) # something in A5 uses a 2d array if "][" in n: n = n.replace("][", " * ") # something uses a division expression if "/" in n: n = "(" + n.replace("/", "//") + ")" t = self.get_type(ftype).__name__ + " * " + n fname = mob.group(2) flist.append((fname, t)) continue vars = field.split(",") mob = re.match("\s*(.*?)\s+(\w+)\s*$", vars[0]) t = self.get_type(mob.group(1)) vname = mob.group(2) if t is not None and vname is not None: flist.append((vname, t.__name__)) for v in vars[1:]: flist.append((v.strip(), t.__name__)) else: print("Error: " + str(vars)) try: self.types[name].my_fields = flist except AttributeError: print(name, flist) self.parse_funcs(funcs) def main(): p = optparse.OptionParser() p.add_option("-o", "--output", help="location of generated file") p.add_option("-p", "--protos", help="A file with all " + "prototypes to generate Python wrappers for, one per line. " "Generate it with docs/scripts/checkdocs.py -p") p.add_option("-t", "--type", help="the library type to " + "use, e.g. debug") p.add_option("-v", "--version", help="the library version to " + "use, e.g. 5.1") options, args = p.parse_args() if not options.protos: p.print_help() return al = Allegro() al.parse_protos(options.protos) f = open(options.output, "w") if options.output else sys.stdout release = options.type version = options.version f.write(r"""# Generated by generate_python_ctypes.py. import os, platform, sys from ctypes import * from ctypes.util import * # You must adjust this function to point ctypes to the A5 DLLs you are # distributing. _dlls = [] def _add_dll(name): release = "%(release)s" if os.name == "nt": release = "%(release)s-%(version)s" # Under Windows, DLLs are found in the current directory, so this # would be an easy way to keep all your DLLs in a sub-folder. # os.chdir("dlls") path = find_library(name + release) if not path: if os.name == "mac": path = name + release + ".dylib" elif os.name == "nt": path = name + release + ".dll" elif os.name == "posix": if platform.mac_ver()[0]: path = name + release + ".dylib" else: path = "lib" + name + release + ".so" else: sys.stderr.write("Cannot find library " + name + "\n") # In most cases, you actually don't want the above and instead # use the exact filename within your game distribution, possibly # even within a .zip file. # if not os.path.exists(path): # path = "dlls/" + path try: # RTLD_GLOBAL is required under OSX for some reason (?) _dlls.append(CDLL(path, RTLD_GLOBAL)) except OSError: # No need to fail here, might just be one of the addons. pass # os.chdir("..") _add_dll("allegro") _add_dll("allegro_acodec") _add_dll("allegro_audio") _add_dll("allegro_primitives") _add_dll("allegro_color") _add_dll("allegro_font") _add_dll("allegro_ttf") _add_dll("allegro_image") _add_dll("allegro_dialog") _add_dll("allegro_memfile") _add_dll("allegro_physfs") _add_dll("allegro_shader") _add_dll("allegro_main") _add_dll("allegro_monolith") # We don't have information ready which A5 function is in which DLL, # so we just try them all. def _dll(func, ret, params): for dll in _dlls: try: f = dll[func] f.restype = ret f.argtypes = params return f except AttributeError: pass sys.stderr.write("Cannot find function " + func + "\n") return lambda *args: None # In Python3, all Python strings are unicode so we have to convert to # UTF8 byte strings before passing to Allegro. if sys.version_info[0] > 2: class _AL_UTF8String: def from_param(x): return x.encode("utf8") else: _AL_UTF8String = c_char_p """ % locals()) postpone = [] for name, val in sorted(al.constants.items()): try: if isinstance(val, str): val = int(eval(val, globals(), al.constants)) f.write(name + " = " + str(val) + "\n") except: postpone.append((name, val)) for name, val in postpone: f.write(name + " = " + val + "\n") structs = set() # output everything except structs and unions for name, x in sorted(al.types.items()): if not name: continue base = x.__bases__[0] if base != Structure and base != Union: f.write(name + " = " + x.__name__ + "\n") else: structs.add(name) # order structs and unions by their dependencies structs_list = [] remaining = set(structs) while remaining: for name in sorted(remaining): ok = True x = al.types[name] if hasattr(x, "my_fields"): for fname, ftype in x.my_fields: if " " in ftype: ftype = ftype.split()[0] if ftype in structs and ftype in remaining: ok = False break if ok: structs_list.append(name) remaining.remove(name) for name in structs_list: x = al.types[name] base = x.__bases__[0] f.write("class " + name + "(" + base.__name__ + "):\n") if hasattr(x, "my_fields"): f.write(" _fields_ = [\n") for fname, ftype in x.my_fields: f.write(" (\"" + fname + "\", " + ftype + "),\n") f.write(" ]\n") else: f.write(" pass\n") pt = POINTER(x) f.write("%s = POINTER(%s)\n" % (pt.__name__, name)) for name, x in sorted(al.functions.items()): try: line = name + " = _dll(\"" + name + "\", " line += x.restype.__name__ + ", " line += "[" + (", ".join([a.__name__ for a in x.argtypes])) +\ "])\n" f.write(line) except AttributeError as e: print("Ignoring " + name + " because of errors (" + str(e) + ").") # some stuff the automated parser doesn't pick up f.write(r""" ALLEGRO_VERSION_INT = \ ((ALLEGRO_VERSION << 24) | (ALLEGRO_SUB_VERSION << 16) | \ (ALLEGRO_WIP_VERSION << 8) | ALLEGRO_RELEASE_NUMBER) """) f.write(r""" # work around bug http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36834 if os.name == "nt": def al_map_rgba_f(r, g, b, a): return ALLEGRO_COLOR(r, g, b, a) def al_map_rgb_f(r, g, b): return ALLEGRO_COLOR(r, g, b, 1) def al_map_rgba(r, g, b, a): return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, a / 255.0) def al_map_rgb(r, g, b): return ALLEGRO_COLOR(r / 255.0, g / 255.0, b / 255.0, 1) """) f.write(""" def al_main(real_main, *args): def python_callback(argc, argv): real_main(*args) return 0 cb = CFUNCTYPE(c_int, c_int, c_void_p)(python_callback) al_run_main(0, 0, cb); """) f.close() main()
[ [ [ 29, 32 ], [ 11543, 11546 ] ], [ [ 40, 42 ], [ 1614, 1616 ], [ 1669, 1671 ], [ 1717, 1719 ], [ 1766, 1768 ], [ 1819, 1821 ], [ 3829, 3831 ], [ 4480, 4482 ], [ 6784, 6786 ], [ 8717, 8719 ], [ 8938, 8940 ], [ 9137, 9139 ], [ 10242, 10244 ] ], [ [ 50, 58 ], [ 10862, 10870 ] ], [ [ 78, 79 ], [ 404, 413 ], [ 507, 512 ], [ 625, 631 ], [ 654, 660 ], [ 682, 688 ], [ 718, 725 ], [ 746, 751 ], [ 777, 783 ], [ 812, 818 ], [ 843, 850 ], [ 876, 884 ], [ 909, 916 ], [ 942, 950 ], [ 975, 982 ], [ 1008, 1016 ], [ 1043, 1051 ], [ 1077, 1085 ], [ 1109, 1115 ], [ 1145, 1152 ], [ 1174, 1180 ], [ 1204, 1212 ], [ 1235, 1242 ], [ 1266, 1273 ], [ 1298, 1306 ], [ 1329, 1336 ], [ 1360, 1368 ], [ 1394, 1399 ], [ 1421, 1429 ], [ 1573, 1581 ], [ 2037, 2044 ], [ 2079, 2087 ], [ 4244, 4252 ], [ 4418, 4426 ], [ 5041, 5046 ], [ 6441, 6446 ], [ 7060, 7068 ], [ 14717, 14726 ], [ 14739, 14744 ], [ 15915, 15922 ] ], [ [ 201, 215 ], [ 1452, 1466 ] ], [ [ 234, 241 ], [ 11438, 11445 ] ], [ [ 10846, 10850 ], [ 17296, 17300 ] ] ]
"""File for Google Cloud Storage.""" import logging import os import urllib.parse from pathlib import Path import aiohttp from aiofile import AIOFile from gcloud.aio.storage import Storage from google.cloud import storage from one_barangay.local_settings import logger async def async_upload_to_bucket( filepath: str, file_obj, gcs_path: str, ): """Upload files to bucket. Args: filepath: str: The path to the file to be uploaded. file_obj: The file object from reading a file gcs_path: str: The target bucket name and sub-folder in GCS to upload to. (e.g. documents/photo) Returns: The path to the uploaded file. """ async with aiohttp.ClientSession() as session: gcs_storage = Storage(session=session) # skipcq gcs_filename = filepath.split("/")[-1] await gcs_storage.upload(gcs_path, gcs_filename, file_obj) return f"https://storage.googleapis.com/{gcs_path}/{urllib.parse.quote(gcs_filename)}" async def upload_to_gcs_runner( filepath: str, gcs_path: str, ): """Call the 'async_upload_to_bucket'. Args: filepath: str: The path to the file to be uploaded. gcs_path: str: The target bucket name and sub-folder in GCS. Returns: The path to the uploaded file. """ # target_bucket_name = target_bucket_name # bucket_folder = bucket_folder try: async with AIOFile(filepath, mode="rb") as afp: f = await afp.read() path = await async_upload_to_bucket(filepath, f, gcs_path) return path except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("File not uploaded. %s", e) def download_from_gcs( filename: str, target_bucket_name: str, bucket_folder: str, ): """Download file from Google Cloud Storage bucket. Args: filename: str: The name of file being downloaded. target_bucket_name: str: The bucket name from which to download to. bucket_folder: str: The folder from the bucket name from which to download to. Returns: None. """ try: storage_client = storage.Client(os.getenv("GOOGLE_PROJECT_ID")) bucket_name = storage_client.get_bucket(target_bucket_name) bucket = storage_client.get_bucket(bucket_name) path = os.path.join(bucket_folder, filename) base_dir = Path(__file__).resolve().parent.parent # TODO: Change to user location destination = os.path.join(base_dir, filename) blob = bucket.blob(path) blob.download_to_filename(destination) logging.info("%s downloaded to %s.", filename, destination) except FileNotFoundError as e: logger.exception("File not found. Make sure the file exists. %s", e) except OSError as e: logger.exception("%s not downloaded. %s", filename, e) # if __name__ == "__main__": # Sample Calls to Uploading to GCS # asyncio.run( # upload_to_gcs_runner( # "<your_absolute_filepath>" # ) # ) # Sample Calls to Downloading from GCS # download_from_gcs( # "kath.png", # str(os.getenv("GS_MEDIA_BUCKET_NAME")), # str(os.getenv("FILE_BUCKET_FOLDER")), # )
[ [ [ 44, 51 ], [ 2701, 2708 ] ], [ [ 59, 61 ], [ 2255, 2257 ], [ 2426, 2428 ], [ 2579, 2581 ] ], [ [ 69, 81 ], [ 979, 985 ] ], [ [ 102, 106 ], [ 2484, 2488 ] ], [ [ 115, 122 ], [ 712, 719 ] ], [ [ 143, 150 ], [ 1436, 1443 ] ], [ [ 182, 189 ], [ 770, 777 ] ], [ [ 215, 222 ], [ 2240, 2247 ] ], [ [ 264, 270 ], [ 1644, 1650 ], [ 1746, 1752 ], [ 2804, 2810 ], [ 2906, 2912 ] ], [ [ 273, 1013 ], [ 1531, 1553 ] ], [ [ 1016, 1790 ] ], [ [ 1797, 1814 ] ] ]
##################################################### # Copyright (c) Xuanyi Dong [GitHub D-X-Y], 2021.04 # ##################################################### # python exps/LFNA/basic-same.py --srange 1-999 --env_version v1 --hidden_dim 16 # python exps/LFNA/basic-same.py --srange 1-999 --env_version v2 --hidden_dim ##################################################### import sys, time, copy, torch, random, argparse from tqdm import tqdm from copy import deepcopy from pathlib import Path lib_dir = (Path(__file__).parent / ".." / ".." / "lib").resolve() if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir)) from procedures import prepare_seed, prepare_logger, save_checkpoint, copy_checkpoint from log_utils import time_string from log_utils import AverageMeter, convert_secs2time from utils import split_str2indexes from procedures.advanced_main import basic_train_fn, basic_eval_fn from procedures.metric_utils import SaveMetric, MSEMetric, ComposeMetric from datasets.synthetic_core import get_synthetic_env from models.xcore import get_model from lfna_utils import lfna_setup def subsample(historical_x, historical_y, maxn=10000): total = historical_x.size(0) if total <= maxn: return historical_x, historical_y else: indexes = torch.randint(low=0, high=total, size=[maxn]) return historical_x[indexes], historical_y[indexes] def main(args): logger, env_info, model_kwargs = lfna_setup(args) # check indexes to be evaluated to_evaluate_indexes = split_str2indexes(args.srange, env_info["total"], None) logger.log( "Evaluate {:}, which has {:} timestamps in total.".format( args.srange, len(to_evaluate_indexes) ) ) w_container_per_epoch = dict() per_timestamp_time, start_time = AverageMeter(), time.time() for i, idx in enumerate(to_evaluate_indexes): need_time = "Time Left: {:}".format( convert_secs2time( per_timestamp_time.avg * (len(to_evaluate_indexes) - i), True ) ) logger.log( "[{:}]".format(time_string()) + " [{:04d}/{:04d}][{:04d}]".format(i, len(to_evaluate_indexes), idx) + " " + need_time ) # train the same data historical_x = env_info["{:}-x".format(idx)] historical_y = env_info["{:}-y".format(idx)] # build model model = get_model(dict(model_type="simple_mlp"), **model_kwargs) # build optimizer optimizer = torch.optim.Adam(model.parameters(), lr=args.init_lr, amsgrad=True) criterion = torch.nn.MSELoss() lr_scheduler = torch.optim.lr_scheduler.MultiStepLR( optimizer, milestones=[ int(args.epochs * 0.25), int(args.epochs * 0.5), int(args.epochs * 0.75), ], gamma=0.3, ) train_metric = MSEMetric() best_loss, best_param = None, None for _iepoch in range(args.epochs): preds = model(historical_x) optimizer.zero_grad() loss = criterion(preds, historical_y) loss.backward() optimizer.step() lr_scheduler.step() # save best if best_loss is None or best_loss > loss.item(): best_loss = loss.item() best_param = copy.deepcopy(model.state_dict()) model.load_state_dict(best_param) with torch.no_grad(): train_metric(preds, historical_y) train_results = train_metric.get_info() metric = ComposeMetric(MSEMetric(), SaveMetric()) eval_dataset = torch.utils.data.TensorDataset( env_info["{:}-x".format(idx)], env_info["{:}-y".format(idx)] ) eval_loader = torch.utils.data.DataLoader( eval_dataset, batch_size=args.batch_size, shuffle=False, num_workers=0 ) results = basic_eval_fn(eval_loader, model, metric, logger) log_str = ( "[{:}]".format(time_string()) + " [{:04d}/{:04d}]".format(idx, env_info["total"]) + " train-mse: {:.5f}, eval-mse: {:.5f}".format( train_results["mse"], results["mse"] ) ) logger.log(log_str) save_path = logger.path(None) / "{:04d}-{:04d}.pth".format( idx, env_info["total"] ) w_container_per_epoch[idx] = model.get_w_container().no_grad_clone() save_checkpoint( { "model_state_dict": model.state_dict(), "model": model, "index": idx, "timestamp": env_info["{:}-timestamp".format(idx)], }, save_path, logger, ) logger.log("") per_timestamp_time.update(time.time() - start_time) start_time = time.time() save_checkpoint( {"w_container_per_epoch": w_container_per_epoch}, logger.path(None) / "final-ckp.pth", logger, ) logger.log("-" * 200 + "\n") logger.close() if __name__ == "__main__": parser = argparse.ArgumentParser("Use the data in the past.") parser.add_argument( "--save_dir", type=str, default="./outputs/lfna-synthetic/use-same-timestamp", help="The checkpoint directory.", ) parser.add_argument( "--env_version", type=str, required=True, help="The synthetic enviornment version.", ) parser.add_argument( "--hidden_dim", type=int, required=True, help="The hidden dimension.", ) parser.add_argument( "--init_lr", type=float, default=0.1, help="The initial learning rate for the optimizer (default is Adam)", ) parser.add_argument( "--batch_size", type=int, default=512, help="The batch size", ) parser.add_argument( "--epochs", type=int, default=1000, help="The total number of epochs.", ) parser.add_argument( "--srange", type=str, required=True, help="The range of models to be evaluated" ) parser.add_argument( "--workers", type=int, default=4, help="The number of data loading workers (default: 4)", ) # Random Seed parser.add_argument("--rand_seed", type=int, default=-1, help="manual seed") args = parser.parse_args() if args.rand_seed is None or args.rand_seed < 0: args.rand_seed = random.randint(1, 100000) assert args.save_dir is not None, "The save dir argument can not be None" args.save_dir = "{:}-{:}-d{:}".format( args.save_dir, args.env_version, args.hidden_dim ) main(args)
[ [ [ 382, 385 ], [ 586, 589 ], [ 600, 603 ] ], [ [ 387, 391 ], [ 1827, 1831 ], [ 4857, 4861 ], [ 4904, 4908 ] ], [ [ 393, 397 ], [ 3415, 3419 ] ], [ [ 399, 404 ], [ 1291, 1296 ], [ 2541, 2546 ], [ 2629, 2634 ], [ 2671, 2676 ], [ 3504, 3509 ], [ 3697, 3702 ], [ 3834, 3839 ] ], [ [ 406, 412 ], [ 6580, 6586 ] ], [ [ 414, 422 ], [ 5158, 5166 ] ], [ [ 440, 444 ] ], [ [ 462, 470 ] ], [ [ 491, 495 ], [ 508, 512 ] ], [ [ 497, 504 ], [ 570, 577 ], [ 623, 630 ] ], [ [ 656, 668 ] ], [ [ 670, 684 ] ], [ [ 686, 701 ], [ 4515, 4530 ], [ 4921, 4936 ] ], [ [ 703, 718 ] ], [ [ 741, 752 ], [ 2115, 2126 ], [ 4071, 4082 ] ], [ [ 775, 787 ], [ 1811, 1823 ] ], [ [ 789, 806 ], [ 1947, 1964 ] ], [ [ 826, 843 ], [ 1532, 1549 ] ], [ [ 882, 896 ] ], [ [ 898, 911 ], [ 3974, 3987 ] ], [ [ 948, 958 ], [ 3660, 3670 ] ], [ [ 960, 969 ], [ 2950, 2959 ], [ 3647, 3656 ] ], [ [ 971, 984 ], [ 3633, 3646 ] ], [ [ 1021, 1038 ] ], [ [ 1064, 1073 ], [ 2438, 2447 ] ], [ [ 1098, 1108 ], [ 1452, 1462 ] ], [ [ 1115, 1124 ] ], [ [ 1403, 1407 ], [ 6794, 6798 ] ], [ [ 5149, 5155 ], [ 5215, 5221 ], [ 5391, 5397 ], [ 5539, 5545 ], [ 5673, 5679 ], [ 5844, 5850 ], [ 5969, 5975 ], [ 6104, 6110 ], [ 6223, 6229 ], [ 6394, 6400 ], [ 6482, 6488 ] ], [ [ 6475, 6479 ], [ 6509, 6513 ], [ 6535, 6539 ], [ 6563, 6567 ], [ 6617, 6621 ], [ 6735, 6739 ], [ 6750, 6754 ], [ 6768, 6772 ], [ 6688, 6692 ], [ 6799, 6803 ] ] ]
''' Given a collection of intervals, merge all overlapping intervals. Example 1: Input: intervals = [[1,3],[2,6],[8,10],[15,18]] Output: [[1,6],[8,10],[15,18]] Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6]. Example 2: Input: intervals = [[1,4],[4,5]] Output: [[1,5]] Explanation: Intervals [1,4] and [4,5] are considered overlapping. NOTE: input types have been changed on April 15, 2019. Please reset to default code definition to get new method signature. Constraints: intervals[i][0] <= intervals[i][1] ''' class Solution: def merge(self, intervals: List[List[int]]) -> List[List[int]]: intervals = sorted(intervals, key = lambda x: x[0]) output = [] i = 0 if len(intervals) <= 1: return intervals while i < len(intervals) - 1: tmp = intervals[i] while tmp[1] >= intervals[i + 1][0]: tmp[1] = max(tmp[1], intervals[i + 1][1]) i += 1 if i >= len(intervals) - 1: break i += 1 output.append(tmp) if i <= len(intervals) - 1: output.append(intervals[-1]) return output
[ [ [ 555, 563 ] ] ]
from rest_framework import viewsets from periodic_tasks_api.models import CustomExtendedPeriodicTask from periodic_tasks_api.serializers import PeriodicTaskSerializer from periodic_tasks_api.filters import PeriodicTaskFilterSet class PeriodicTaskView(viewsets.ModelViewSet): queryset = CustomExtendedPeriodicTask.objects.all() serializer_class = PeriodicTaskSerializer filter_backends = [PeriodicTaskFilterSet]
[ [ [ 27, 35 ], [ 254, 262 ] ], [ [ 75, 101 ], [ 293, 319 ] ], [ [ 145, 167 ], [ 357, 379 ] ], [ [ 207, 228 ], [ 403, 424 ] ], [ [ 237, 253 ] ] ]
#!/usr/bin/python3 # coding: utf-8 from network.group import Group import paho.mqtt.client as mqtt from threading import Thread import time from log import logger import paho.mqtt.subscribe as subscribe import json import random import string class Switch(Thread): def __init__(self, broker_ip): Thread.__init__(self) self.broker_ip = broker_ip self.groups = {} self.drivers = { "leds" : {}, "sensors": {}, "blinds": {} } self.diagnostic = { "config": {}, "events": {} } self.name = "Switch" + ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(12)) def on_disconnect(self, client, userdata, rc): if rc != 0: logger.warning("Unexpected client disconnect for %r, will reconnect", self.name) def run(self): self.client = mqtt.Client(self.name) self.client.on_message = self.event_received self.client.on_disconnect = self.on_disconnect self.client.connect(self.broker_ip) self.client.loop_start() subscribe.callback(self.event_received, "#", hostname=self.broker_ip) while self.is_alive: time.sleep(1) self.client.loop_stop() def event_received(self, client, userdata, message): try: data = message.payload.decode("utf-8") logger.debug("received url %r %r", message.topic, str(data)) if message.topic.endswith("/setup/hello"): data = json.loads(data) topic_url = data["topic"] + "/setup/config" config = {} if data["type"] == "led": config["iMax"] = 700 self.client.publish("/write/" + topic_url, json.dumps(config)) except: logger.exception("Invalid value received") def create_group(self, leds, sensors, blinds, group_id): if group_id in self.groups: return False group = Group(self.broker_ip, group_id) self.groups[group_id] = group for led in leds: group.add_led(led) for sensor in sensors: group.add_sensor(sensor) for blind in blinds: group.add_blind(blind) group.start() self.diagnostic['events'][time.time()] = "Group " + str(group_id) + "has been created and contains " + json.dumps(group.serialize()) return True def add_driver_to_group(self, group_id, driver_type, mac): if group_id not in self.groups: return False group = self.groups[group_id] if driver_type == "led": led = self.get_led(mac) if not led: return False return group.add_led(led) elif driver_type == "sensor": sensor = self.get_sensor(mac) if not sensor: return False return group.add_sensor(sensor) elif driver_type == "blind": blind = self.get_blind(mac) if not blind: return False return group.add_blind(blind) self.diagnostic['events'][time.time()] = "Driver " + driver_type + " : " + mac + "has been been added to " + group_id return False def get_group_id(self, group_id): if group_id in self.groups: return self.groups[group_id] return {} def list_groups(self): return self.groups.values() def update_group_rules(self, group_id, rule_id, value): if group_id not in self.groups: return False if rule_id == "brightness": self.groups[group_id].set_brightness(value) elif rule_id == "temperature": self.groups[group_id].set_temperature(value) elif rule_id == "presence": self.groups[group_id].set_presence(value) self.diagnostic['events'][time.time()] = "Rule " + rule_id + " is set to " + str(value) + " for " + str(group_id) return True def list_leds(self): return self.drivers["leds"].values() def get_led(self, led_id): if led_id in self.drivers["leds"]: return self.drivers["leds"][led_id] return None def plug_led(self, led): self.drivers["leds"][led.mac] = led self.diagnostic['events'][time.time()] = "New led " + led.mac + " has been plugged into the switch" def unplug_led(self, led): if led.mac in self.drivers["leds"]: del self.drivers["leds"][led.mac] self.diagnostic['events'][time.time()] = "Led " + led.mac + " has been unplugged from the switch" def list_sensors(self): return self.drivers["sensors"].values() def get_sensor(self, sensor_id): if sensor_id in self.drivers["sensors"]: return self.drivers["sensors"][sensor_id] return None def plug_sensor(self, sensor): self.drivers["sensors"][sensor.mac] = sensor self.diagnostic['events'][time.time()] = "New sensor " + sensor.mac + " has been plugged into the switch" def unplug_sensor(self, sensor): if sensor.mac in self.drivers["sensors"]: del self.drivers["sensors"][sensor.mac] self.diagnostic['events'][time.time()] = "Sensor " + sensor.mac + " has been unplugged from the switch" def switch_led_mode(self, led_id, auto=True): if led_id not in self.drivers["leds"]: return False led = self.drivers["leds"][led_id] url = "/write/" + led.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch led " + led.mac + " into mode " + status self.client.publish(url, "%s" % auto) return True def list_blinds(self): return self.drivers["blinds"].values() def get_blind(self, blind_id): if blind_id in self.drivers["blinds"]: return self.drivers["blinds"][blind_id] return None def plug_blind(self, blind): self.drivers["blinds"][blind.mac] = blind self.diagnostic['events'][time.time()] = "New blind " + blind.mac + " has been plugged into the switch" def unplug_blind(self, blind): if blind.mac in self.drivers["blinds"]: del self.drivers["blinds"][blind.mac] self.diagnostic['events'][time.time()] = "Blind " + blind.mac + " has been unplugged from the switch" def get_diagnostic(self): self.diagnostic["config"]["groups"] = [group.serialize() for group in self.groups.values()] return self.diagnostic def set_manual_led_brightness(self, led_id, brightness=0): if led_id not in self.drivers["leds"]: return False led = self.drivers["leds"][led_id] url = "/write/" + led.base_topic + "/base/setpointManual" logger.info("Send setpoint to %r for %r", brightness, url) self.diagnostic['events'][time.time()] = "Force led " + led.mac + " brightness " + str(brightness) logger.info(" back %r", self.client.publish(url, str(brightness))) return True def switch_blind_mode(self, blind_id, auto=True): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] url = "/write/" + blind.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch blind " + blind.mac + " into mode " + status self.client.publish(url, "%s" % auto) return True def set_manual_blind_position(self, blind_id, position, blind_number=0): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] if not blind_number or blind_number == 1: url = "/write/" + blind.base_topic + "/base/blind1Manual" logger.info("Send position to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position) self.client.publish(url, str(position)) if not blind_number or blind_number == 2: url = "/write/" + blind.base_topic + "/base/blind2Manual" logger.info("Send position to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " position " + str(position) self.client.publish(url, str(position)) def set_manual_blind_fin(self, blind_id, fin, blind_number=0): if blind_id not in self.drivers["blinds"]: return False blind = self.drivers["blinds"][blind_id] if not blind_number or blind_number == 1: url = "/write/" + blind.base_topic + "/base/fin1Manual" logger.info("Send position to %r for %r", fin, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin) self.client.publish(url, str(fin)) if not blind_number or blind_number == 2: url = "/write/" + blind.base_topic + "/base/fin2Manual" logger.info("Send position to %r for %r", fin, url) self.diagnostic['events'][time.time()] = "Force blind " + blind.mac + " fin " + str(fin) self.client.publish(url, str(fin)) def switch_group_mode(self, group_id, auto=True): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/status/auto" logger.info("Send switch mode to %r for %r", auto, url) status = "auto" if not auto: status = "manual" self.diagnostic['events'][time.time()] = "Switch group " + str(group.group_id) + " into mode " + str(status) self.client.publish(url, "%s" % auto) return True def set_group_setpoint(self, group_id, setpoint): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/config/setpoint" logger.info("Send setpoint value to %r for %r", setpoint, url) self.diagnostic['events'][time.time()] = "Send setpoint " + str(setpoint) + " to group " + str(group.group_id) self.client.publish(url, str(setpoint)) return True def set_group_blind_position(self, group_id, position): if group_id not in self.groups: return False group = self.groups[group_id] url = "/write/" + group.base_topic + "/config/blindPosition" logger.info("Send setpoint value to %r for %r", position, url) self.diagnostic['events'][time.time()] = "Send blind position " + str(position) + " to group " + str(group.group_id) self.client.publish(url, str(position)) return True
[ [ [ 62, 67 ], [ 2041, 2046 ] ], [ [ 76, 100 ], [ 915, 919 ] ], [ [ 123, 129 ], [ 259, 265 ], [ 312, 318 ] ], [ [ 137, 141 ], [ 1243, 1247 ], [ 2359, 2363 ], [ 3203, 3207 ], [ 3952, 3956 ], [ 4382, 4386 ], [ 4612, 4616 ], [ 5045, 5049 ], [ 5299, 5303 ], [ 5774, 5778 ], [ 6253, 6257 ], [ 6499, 6503 ], [ 7083, 7087 ], [ 7664, 7668 ], [ 8228, 8232 ], [ 8580, 8584 ], [ 9118, 9122 ], [ 9448, 9452 ], [ 9949, 9953 ], [ 10425, 10429 ], [ 10916, 10920 ] ], [ [ 158, 164 ], [ 792, 798 ], [ 1423, 1429 ], [ 1858, 1864 ], [ 5609, 5615 ], [ 6990, 6996 ], [ 7164, 7170 ], [ 7499, 7505 ], [ 8133, 8139 ], [ 8485, 8491 ], [ 9028, 9034 ], [ 9358, 9364 ], [ 9784, 9790 ], [ 10328, 10334 ], [ 10819, 10825 ] ], [ [ 172, 204 ], [ 1131, 1140 ] ], [ [ 212, 216 ], [ 1563, 1567 ], [ 1810, 1814 ], [ 2436, 2440 ] ], [ [ 224, 230 ], [ 634, 640 ] ], [ [ 238, 244 ], [ 648, 654 ], [ 673, 679 ] ], [ [ 252, 258 ] ] ]
# coding=utf-8 ######################################################################################################################## ### Do not forget to adjust the following variables to your own plugin. # The plugin's identifier, has to be unique plugin_identifier = "bedlevelvisualizer" # The plugin's python package, should be "octoprint_<plugin identifier>", has to be unique plugin_package = "octoprint_bedlevelvisualizer" # The plugin's human readable name. Can be overwritten within OctoPrint's internal data via __plugin_name__ in the # plugin module plugin_name = "Bed Visualizer" # The plugin's version. Can be overwritten within OctoPrint's internal data via __plugin_version__ in the plugin module plugin_version = "0.1.15" # The plugin's description. Can be overwritten within OctoPrint's internal data via __plugin_description__ in the plugin # module plugin_description = """Displays 3D mesh of bed topography report.""" # The plugin's author. Can be overwritten within OctoPrint's internal data via __plugin_author__ in the plugin module plugin_author = "jneilliii" # The plugin's author's mail address. plugin_author_email = "jneilliii+octoprint@gmail.com" # The plugin's homepage URL. Can be overwritten within OctoPrint's internal data via __plugin_url__ in the plugin module plugin_url = "https://github.com/jneilliii/OctoPrint-BedLevelVisualizer" # The plugin's license. Can be overwritten within OctoPrint's internal data via __plugin_license__ in the plugin module plugin_license = "AGPLv3" # Any additional requirements besides OctoPrint should be listed here plugin_requires = ["numpy>=1.16.0,<=1.19.2"] ### -------------------------------------------------------------------------------------------------------------------- ### More advanced options that you usually shouldn't have to touch follow after this point ### -------------------------------------------------------------------------------------------------------------------- # Additional package data to install for this plugin. The subfolders "templates", "static" and "translations" will # already be installed automatically if they exist. plugin_additional_data = [] # Any additional python packages you need to install with your plugin that are not contains in <plugin_package>.* plugin_addtional_packages = [] # Any python packages within <plugin_package>.* you do NOT want to install with your plugin plugin_ignored_packages = [] # Additional parameters for the call to setuptools.setup. If your plugin wants to register additional entry points, # define dependency links or other things like that, this is the place to go. Will be merged recursively with the # default setup parameters as provided by octoprint_setuptools.create_plugin_setup_parameters using # octoprint.util.dict_merge. # # Example: # plugin_requires = ["someDependency==dev"] # additional_setup_parameters = {"dependency_links": ["https://github.com/someUser/someRepo/archive/master.zip#egg=someDependency-dev"]} additional_setup_parameters = {} ######################################################################################################################## from setuptools import setup try: import octoprint_setuptools except: print("Could not import OctoPrint's setuptools, are you sure you are running that under " "the same python installation that OctoPrint is installed under?") import sys sys.exit(-1) setup_parameters = octoprint_setuptools.create_plugin_setup_parameters( identifier=plugin_identifier, package=plugin_package, name=plugin_name, version=plugin_version, description=plugin_description, author=plugin_author, mail=plugin_author_email, url=plugin_url, license=plugin_license, requires=plugin_requires, additional_packages=plugin_addtional_packages, ignored_packages=plugin_ignored_packages, additional_data=plugin_additional_data ) if len(additional_setup_parameters): from octoprint.util import dict_merge setup_parameters = dict_merge(setup_parameters, additional_setup_parameters) setup(**setup_parameters)
[ [ [ 254, 271 ], [ 3508, 3525 ] ], [ [ 387, 401 ], [ 3536, 3550 ] ], [ [ 567, 578 ], [ 3558, 3569 ] ], [ [ 719, 733 ], [ 3580, 3594 ] ], [ [ 876, 894 ], [ 3609, 3627 ] ], [ [ 1065, 1078 ], [ 3637, 3650 ] ], [ [ 1132, 1151 ], [ 3658, 3677 ] ], [ [ 1308, 1318 ], [ 3684, 3694 ] ], [ [ 1502, 1516 ], [ 3705, 3719 ] ], [ [ 1599, 1614 ], [ 3731, 3746 ] ], [ [ 2146, 2168 ], [ 3856, 3878 ] ], [ [ 2289, 2314 ], [ 3769, 3794 ] ], [ [ 2413, 2436 ], [ 3814, 3837 ] ], [ [ 3004, 3031 ], [ 3889, 3916 ], [ 4007, 4034 ] ], [ [ 3183, 3188 ], [ 4037, 4042 ] ], [ [ 3203, 3223 ], [ 3443, 3463 ] ], [ [ 3405, 3408 ], [ 3410, 3413 ] ], [ [ 3424, 3440 ], [ 3989, 4005 ], [ 4045, 4061 ] ], [ [ 3947, 3957 ], [ 3978, 3988 ] ], [ [ 3959, 3975 ], [ 4045, 4061 ] ] ]
#!/usr/bin/env python3 # Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING! """Implements the client for test.""" # pylint: skip-file # pydocstyle: add-ignore=D105,D107,D401 import contextlib import json from typing import Any, BinaryIO, Dict, List, MutableMapping, Optional import requests import requests.auth class RemoteCaller: """Executes the remote calls to the server.""" def __init__(self, url_prefix: str, auth: Optional[requests.auth.AuthBase] = None) -> None: self.url_prefix = url_prefix self.auth = auth def test_me( self, query_some_parameter: str, path_some_parameter: str) -> bytes: """ Is a test endpoint. :param query_some_parameter: :param path_some_parameter: :return: a confirmation """ url = "".join([ self.url_prefix, '/products/', str(path_some_parameter)]) params = {} # type: Dict[str, str] params['some_parameter'] = query_some_parameter resp = requests.request( method='get', url=url, params=params, auth=self.auth) with contextlib.closing(resp): resp.raise_for_status() return resp.content # Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
[ [ [ 208, 218 ], [ 1224, 1234 ] ], [ [ 226, 230 ] ], [ [ 250, 253 ] ], [ [ 255, 263 ] ], [ [ 265, 269 ] ], [ [ 271, 275 ] ], [ [ 277, 291 ] ], [ [ 293, 301 ], [ 460, 468 ] ], [ [ 310, 318 ] ], [ [ 326, 339 ], [ 469, 477 ], [ 1090, 1098 ] ], [ [ 348, 360 ] ] ]
# Copyright 2021 Research Institute of Systems Planning, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import cached_property from logging import getLogger from typing import Dict, List, Optional, Sequence, Tuple, Union from caret_analyze.value_objects.message_context import MessageContext, MessageContextType from .lttng import Lttng from .value_objects import PublisherValueLttng, SubscriptionCallbackValueLttng from ...common import Columns, Util from ...exceptions import (InvalidArgumentError, UnsupportedNodeRecordsError, UnsupportedTypeError) from ...infra.interface import RuntimeDataProvider from ...infra.lttng.column_names import COLUMN_NAME from ...record import (merge, merge_sequencial, RecordsFactory, RecordsInterface) from ...value_objects import (CallbackChain, CallbackStructValue, CommunicationStructValue, InheritUniqueStamp, NodePathStructValue, PublisherStructValue, Qos, SubscriptionCallbackStructValue, SubscriptionStructValue, Tilde, TimerCallbackStructValue, UseLatestMessage, VariablePassingStructValue) logger = getLogger(__name__) class RecordsProviderLttng(RuntimeDataProvider): """ Records are processed and measurement results are calculated. In addition to merging, filtering and other operations are performed here. """ def __init__( self, lttng: Lttng ) -> None: self._lttng = lttng self._source = FilteredRecordsSource(lttng) self._helper = RecordsProviderLttngHelper(lttng) def communication_records( self, comm_val: CommunicationStructValue ) -> RecordsInterface: """ Provide communication records. Parameters ---------- comm_info : CommunicationStructInfo communicadtion info. Returns ------- RecordsInterface Columns [inter process communication case]: - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_publish_timestamp - [callback_name]/callback_start_timestamp Columns [intra process communication case]: - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/message_timestamp - [callback_name]/callback_start_timestamp """ assert comm_val.subscribe_callback_name is not None if self.is_intra_process_communication(comm_val): return self._compose_intra_proc_comm_records(comm_val) return self._compose_inter_proc_comm_records(comm_val) def node_records( self, node_path_val: NodePathStructValue, ) -> RecordsInterface: if node_path_val.message_context is None: # dummy record msg = 'message context is None. return dummy record. ' msg += f'node_name: {node_path_val.node_name}' logger.info(msg) return RecordsFactory.create_instance() if node_path_val.message_context_type == MessageContextType.CALLBACK_CHAIN: return NodeRecordsCallbackChain(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.INHERIT_UNIQUE_STAMP: return NodeRecordsInheritUniqueTimestamp(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.USE_LATEST_MESSAGE: return NodeRecordsUseLatestMessage(self, node_path_val).to_records() if node_path_val.message_context_type == MessageContextType.TILDE: return NodeRecordsTilde(self, node_path_val).to_records() raise UnsupportedNodeRecordsError( 'Unknown message context. ' f'message_context = {node_path_val.message_context.context_type.type_name}' ) def callback_records( self, callback: CallbackStructValue ) -> RecordsInterface: """ Return callback duration records. Parameters ---------- callback_val : CallbackStructValue target callback value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [callback_name]/callback_end_timestamp """ callback_objects = self._helper.get_callback_objects(callback) callback_records = self._source.callback_records(*callback_objects) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.CALLBACK_END_TIMESTAMP ] self._format(callback_records, columns) self._rename_column(callback_records, callback.callback_name, None) return callback_records def subscribe_records( self, subscription: SubscriptionStructValue ) -> RecordsInterface: """ Provide subscription records. Parameters ---------- subscription_value : SubscriptionStructValue Target subscription value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp Raises ------ InvalidArgumentError """ callback = subscription.callback assert callback is not None tilde_subscription = self._helper.get_tilde_subscription(callback) if tilde_subscription is None: return self._subscribe_records(subscription) return self._subscribe_records_with_tilde(subscription) def _subscribe_records( self, subscription: SubscriptionStructValue ) -> RecordsInterface: """ Provide subscription records. Parameters ---------- subscription_value : SubscriptionStructValue Target subscription value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp Raises ------ InvalidArgumentError """ callback = subscription.callback if callback is None: raise InvalidArgumentError( 'callback_value is None. ' f'node_name: {subscription.node_name}' f'callback_name: {subscription.callback_name}' f'topic_name: {subscription.topic_name}' ) callback_objects = self._helper.get_subscription_callback_objects(callback) sub_records = self._source.sub_records(*callback_objects) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] self._format(sub_records, columns) self._rename_column( sub_records, callback.callback_name, subscription.topic_name ) return sub_records def _subscribe_records_with_tilde( self, subscription: SubscriptionStructValue ) -> RecordsInterface: """ Provide subscription records. Parameters ---------- subscription_value : SubscriptionStructValue Target subscription value. Returns ------- RecordsInterface Columns - [callback_name]/callback_start_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp - [topic_name]/tilde_subscribe_timestamp - [topic_name]/tilde_message_id Raises ------ InvalidArgumentError """ callback = subscription.callback if callback is None: raise InvalidArgumentError( 'callback_value is None. ' f'node_name: {subscription.node_name}' f'callback_name: {subscription.callback_name}' f'topic_name: {subscription.topic_name}' ) callback_objects = self._helper.get_subscription_callback_objects(callback) sub_records = self._source.sub_records(*callback_objects) tilde_subscription = self._helper.get_tilde_subscription(callback) if tilde_subscription is not None: tilde_records = self._source.tilde_subscribe_records(tilde_subscription) sub_records = merge_sequencial( left_records=sub_records, right_records=tilde_records, left_stamp_key=COLUMN_NAME.CALLBACK_START_TIMESTAMP, right_stamp_key=COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, join_left_key=None, join_right_key=None, how='left', columns=Columns(sub_records.columns + tilde_records.columns).as_list(), progress_label='binding: tilde_records', ) columns = [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] self._format(sub_records, columns) self._rename_column( sub_records, callback.callback_name, subscription.topic_name ) return sub_records def _publish_records( self, publisher: PublisherStructValue ) -> RecordsInterface: """ Return publish records. Parameters ---------- publish : PublisherStructValue target publisher Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/rclcpp_inter_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp """ publisher_handles = self._helper.get_publisher_handles(publisher) pub_records = self._source.publish_records(publisher_handles) columns = [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] self._format(pub_records, columns) self._rename_column(pub_records, None, publisher.topic_name) return pub_records def publish_records( self, publisher: PublisherStructValue ) -> RecordsInterface: """ Return publish records. Parameters ---------- publish : PublisherStructValue target publisher Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/rclcpp_inter_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp --- - [topic_name]/tilde_publish_timestamp - [topic_name]/tilde_message_id """ tilde_publishers = self._helper.get_tilde_publishers(publisher) if len(tilde_publishers) == 0: return self._publish_records(publisher) return self._publish_records_with_tilde(publisher) def _publish_records_with_tilde( self, publisher: PublisherStructValue ) -> RecordsInterface: """ Return publish records. Parameters ---------- publish : PublisherStructValue target publisher Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rclcpp_intra_publish_timestamp - [topic_name]/rclcpp_inter_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [topic_name]/message_timestamp - [topic_name]/source_timestamp - [topic_name]/tilde_publish_timestamp - [topic_name]/tilde_message_id """ publisher_handles = self._helper.get_publisher_handles(publisher) pub_records = self._source.publish_records(publisher_handles) tilde_publishers = self._helper.get_tilde_publishers(publisher) tilde_records = self._source.tilde_publish_records(tilde_publishers) pub_records = merge_sequencial( left_records=tilde_records, right_records=pub_records, left_stamp_key='tilde_publish_timestamp', right_stamp_key='rclcpp_publish_timestamp', join_left_key=None, join_right_key=None, columns=Columns(tilde_records.columns + pub_records.columns).as_list(), how='right', progress_label='binding: tilde_records', ) columns = [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] self._format(pub_records, columns) self._rename_column(pub_records, None, publisher.topic_name) return pub_records def tilde_records( self, subscription: SubscriptionStructValue, publisher: PublisherStructValue ) -> RecordsInterface: assert subscription.callback is not None publisher_addrs = self._helper.get_tilde_publishers(publisher) subscription_addr = self._helper.get_tilde_subscription(subscription.callback) assert len(publisher_addrs) > 0 assert subscription_addr is not None pub_records = self._source.tilde_publish_records(publisher_addrs) sub_records = self._source.tilde_subscribe_records(subscription_addr) records = merge( left_records=sub_records, right_records=pub_records, join_left_key=COLUMN_NAME.TILDE_MESSAGE_ID, join_right_key=COLUMN_NAME.TILDE_MESSAGE_ID, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left', progress_label='binding: tilde pub and sub records' ) columns = [ COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP ] self._format(records, columns) self._rename_column(records, subscription.callback_name, subscription.topic_name) return records def get_rmw_implementation(self) -> str: return self._lttng.get_rmw_impl() def get_qos( self, pub_sub: Union[PublisherStructValue, SubscriptionStructValue] ) -> Qos: if isinstance(pub_sub, SubscriptionStructValue): sub_cb = pub_sub.callback if sub_cb is None: raise InvalidArgumentError('Failed to get callback information.' 'pub.callback is None') sub_cb_lttng = self._helper.get_lttng_subscription(sub_cb) return self._lttng.get_subscription_qos(sub_cb_lttng) pubs_lttng = self._helper.get_lttng_publishers(pub_sub) if len(pubs_lttng) == 0: raise InvalidArgumentError('No publisher matching the criteria was found.') if len(pubs_lttng) > 1: logger.warning( 'Multiple publishers matching your criteria were found.' 'The value of the first publisher qos will be returned.') return self._lttng.get_publisher_qos(pubs_lttng[0]) def variable_passing_records( self, variable_passing_info: VariablePassingStructValue ) -> RecordsInterface: """ Return variable passing records. Parameters ---------- variable_passing_info : VariablePassingStructInfo target variable passing info. Returns ------- RecordsInterface Columns - [callback_name]/callback_end_timestamp - [callback_name]/callback_start_timestamp """ read_records: RecordsInterface = self.callback_records( variable_passing_info.callback_read) write_records: RecordsInterface = self.callback_records( variable_passing_info.callback_write) read_records.drop_columns([read_records.columns[-1]]) # callback end write_records.drop_columns([write_records.columns[0]]) # callback_start columns = [ write_records.columns[0], read_records.columns[0], ] merged_records = merge_sequencial( left_records=write_records, right_records=read_records, left_stamp_key=columns[0], right_stamp_key=columns[1], join_left_key=None, join_right_key=None, columns=columns, how='left_use_latest', progress_label='binding: callback_end and callback_start' ) merged_records.sort(columns[0]) self._format(merged_records, columns) return merged_records def is_intra_process_communication( self, communication_value: CommunicationStructValue ) -> Optional[bool]: intra_record = self._compose_intra_proc_comm_records(communication_value) return len(intra_record) > 0 def _compose_intra_proc_comm_records( self, comm_info: CommunicationStructValue, ) -> RecordsInterface: """ Compose intra process communication records. Parameters ---------- comm_info : CommunicationStructInfo Target communication info. Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [callback_name]/callback_start_timestamp """ publisher = comm_info.publisher subscription_cb = comm_info.subscribe_callback assert subscription_cb is not None assert isinstance(subscription_cb, SubscriptionCallbackStructValue) publisher_handles = self._helper.get_publisher_handles(publisher) callback_object_intra = self._helper.get_subscription_callback_object_intra( subscription_cb) records = self._source.intra_comm_records(publisher_handles, callback_object_intra) columns = [ COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.CALLBACK_START_TIMESTAMP, ] self._format(records, columns) records.rename_columns({ COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP }) self._rename_column(records, comm_info.subscribe_callback_name, comm_info.topic_name) return records def _compose_inter_proc_comm_records( self, comm_value: CommunicationStructValue ) -> RecordsInterface: """ Composer intar process communication records. Parameters ---------- comm_value : CommunicationStructValue target communication value. Returns ------- RecordsInterface Columns - [topic_name]/rclcpp_publish_timestamp - [topic_name]/rcl_publish_timestamp - [topic_name]/dds_write_timestamp - [callback_name_name]/callback_start_timestamp """ publisher = comm_value.publisher subscription_cb = comm_value.subscribe_callback assert subscription_cb is not None assert isinstance(subscription_cb, SubscriptionCallbackStructValue) publisher_handles = self._helper.get_publisher_handles(publisher) callback_object = self._helper.get_subscription_callback_object_inter(subscription_cb) records = self._source.inter_comm_records(publisher_handles, callback_object) columns = [ COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.CALLBACK_START_TIMESTAMP ] self._format(records, columns) records.rename_columns({ COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP }) self._rename_column(records, comm_value.subscribe_callback_name, comm_value.topic_name) return records @staticmethod def _format(records: RecordsInterface, columns: List[str]): drop = list(set(records.columns) - set(columns)) records.drop_columns(drop) records.reindex(columns) @staticmethod def _rename_column( records: RecordsInterface, callback_name: Optional[str], topic_name: Optional[str] ) -> None: rename_dict = {} if COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP}' if COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP}' if COLUMN_NAME.CALLBACK_START_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.CALLBACK_START_TIMESTAMP] = \ f'{callback_name}/{COLUMN_NAME.CALLBACK_START_TIMESTAMP}' if COLUMN_NAME.CALLBACK_END_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.CALLBACK_END_TIMESTAMP] = \ f'{callback_name}/{COLUMN_NAME.CALLBACK_END_TIMESTAMP}' if COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP}' if COLUMN_NAME.RCL_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.RCL_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.RCL_PUBLISH_TIMESTAMP}' if COLUMN_NAME.DDS_WRITE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.DDS_WRITE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.DDS_WRITE_TIMESTAMP}' if COLUMN_NAME.MESSAGE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.MESSAGE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' if COLUMN_NAME.SOURCE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.SOURCE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.SOURCE_TIMESTAMP}' if COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP}' if COLUMN_NAME.TILDE_MESSAGE_ID in records.columns: rename_dict[COLUMN_NAME.TILDE_MESSAGE_ID] = \ f'{topic_name}/{COLUMN_NAME.TILDE_MESSAGE_ID}' if COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in records.columns: rename_dict[COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP] = \ f'{topic_name}/{COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP}' records.rename_columns(rename_dict) class RecordsProviderLttngHelper: def __init__( self, lttng: Lttng ) -> None: from .bridge import LttngBridge self._bridge = LttngBridge(lttng) def get_callback_objects( self, callback: CallbackStructValue ) -> Tuple[int, Optional[int]]: if isinstance(callback, TimerCallbackStructValue): return self.get_timer_callback_object(callback), None if isinstance(callback, SubscriptionCallbackStructValue): obj = self.get_subscription_callback_object_inter(callback) obj_intra = self.get_subscription_callback_object_intra(callback) if obj_intra is not None: return obj, obj_intra return obj, None msg = 'Failed to get callback object. ' msg += f'{callback.callback_type.type_name} is not supported.' raise UnsupportedTypeError(msg) def get_timer_callback_object( self, callback: TimerCallbackStructValue ) -> int: callback_lttng = self._bridge.get_timer_callback(callback) return callback_lttng.callback_object def get_subscription_callback_objects( self, callback: SubscriptionCallbackStructValue ) -> Tuple[int, Optional[int]]: return self.get_callback_objects(callback) def get_subscription_callback_object_inter( self, callback: SubscriptionCallbackStructValue ) -> int: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.callback_object def get_subscription_callback_object_intra( self, callback: SubscriptionCallbackStructValue ) -> Optional[int]: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.callback_object_intra def get_tilde_subscription( self, callback: SubscriptionCallbackStructValue ) -> Optional[int]: callback_lttng = self._bridge.get_subscription_callback(callback) return callback_lttng.tilde_subscription def get_publisher_handles( self, publisher: PublisherStructValue ) -> List[int]: publisher_lttng = self._bridge.get_publishers(publisher) return [pub_info.publisher_handle for pub_info in publisher_lttng] def get_tilde_publishers( self, publisher_info: PublisherStructValue ) -> List[int]: publisher_lttng = self._bridge.get_publishers(publisher_info) publisher = [pub_info.tilde_publisher for pub_info in publisher_lttng if pub_info.tilde_publisher is not None] return publisher def get_lttng_publishers( self, publisher: PublisherStructValue ) -> List[PublisherValueLttng]: return self._bridge.get_publishers(publisher) def get_lttng_subscription( self, callback: SubscriptionCallbackStructValue ) -> SubscriptionCallbackValueLttng: return self._bridge.get_subscription_callback(callback) class NodeRecordsCallbackChain: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: self._provider = provider self._validate(node_path) self._val = node_path def to_records(self): chain_info = self._val.child if isinstance(chain_info[0], CallbackStructValue): cb_info = chain_info[0] records = self._provider.callback_records(cb_info) else: var_pass_info = chain_info[0] records = self._provider.variable_passing_records(var_pass_info) for chain_element in chain_info[1:]: if isinstance(chain_element, CallbackStructValue): records_ = self._provider.callback_records(chain_element) join_key = records_.columns[0] records = merge( left_records=records, right_records=records_, join_left_key=join_key, join_right_key=join_key, columns=Columns(records.columns + records_.columns), how='left', progress_label='binding: callback_start and callback end' ) continue if isinstance(chain_element, VariablePassingStructValue): records_ = self._provider.variable_passing_records(chain_element) # self._rename_var_pass_records(records_, chain_element) join_key = records_.columns[0] records = merge( left_records=records, right_records=records_, join_left_key=join_key, join_right_key=join_key, columns=Columns(records.columns + records_.columns).as_list(), how='left', progress_label='binding: callback_end and callback start' ) continue last_element = chain_info[-1] if isinstance(last_element, CallbackStructValue) \ and self._val.publisher is not None: last_callback_end_name = Util.filter_items( lambda x: COLUMN_NAME.CALLBACK_END_TIMESTAMP in x, records.columns)[-1] records.drop_columns([last_callback_end_name]) last_callback_start_name = Util.filter_items( lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns)[-1] publish_records = self._provider.publish_records(self._val.publisher) publish_column = publish_records.columns[0] columns = records.columns + [publish_column] records = merge_sequencial( left_records=records, right_records=publish_records, join_left_key=None, join_right_key=None, left_stamp_key=last_callback_start_name, right_stamp_key=publish_column, columns=Columns(records.columns + publish_records.columns).as_list(), how='left', progress_label='binding: callback_start and publish', ) records.drop_columns(list(set(records.columns) - set(columns))) records.reindex(columns) return records @staticmethod def _validate( node_path: NodePathStructValue, ) -> None: if node_path.callbacks is None: raise UnsupportedNodeRecordsError('') if node_path.callbacks is None: raise UnsupportedNodeRecordsError('callback values is None.') if not isinstance(node_path.message_context, CallbackChain): msg = 'node_path.message context is not CallbackChain' raise UnsupportedNodeRecordsError(msg) head_callback = node_path.callbacks[0] tail_callback = node_path.callbacks[-1] if node_path.publish_topic_name is not None and \ tail_callback.publish_topic_names is not None and \ len(tail_callback.publish_topic_names) != 0 and \ node_path.publish_topic_name not in tail_callback.publish_topic_names: raise UnsupportedNodeRecordsError('') if node_path.subscribe_topic_name is not None and \ node_path.subscribe_topic_name != head_callback.subscribe_topic_name: raise UnsupportedNodeRecordsError('') class NodeRecordsInheritUniqueTimestamp: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, InheritUniqueStamp): msg = 'node_path.message context is not InheritUniqueStamp' raise UnsupportedNodeRecordsError(msg) self._provider = provider self._context: InheritUniqueStamp = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) columns = [ sub_records.columns[0], pub_records.columns[0], ] join_left_key = f'{self._node_path.subscribe_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' join_right_key = f'{self._node_path.publish_topic_name}/{COLUMN_NAME.MESSAGE_TIMESTAMP}' pub_sub_records = merge_sequencial( left_records=sub_records, right_records=pub_records, left_stamp_key=sub_records.columns[0], right_stamp_key=pub_records.columns[0], join_left_key=join_left_key, join_right_key=join_right_key, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left_use_latest', progress_label='binding: inherit unique timestamp', ) drop_columns = list(set(pub_sub_records.columns) - set(columns)) pub_sub_records.drop_columns(drop_columns) pub_sub_records.reindex(columns) return pub_sub_records @staticmethod def _validate( node_path: NodePathStructValue, context: InheritUniqueStamp, ) -> None: def is_valid() -> bool: if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'InheritUniqueStamp cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class NodeRecordsUseLatestMessage: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, UseLatestMessage): raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage') self._provider = provider self._context: UseLatestMessage = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) columns = [ sub_records.columns[0], f'{self._node_path.publish_topic_name}/rclcpp_publish_timestamp', ] pub_sub_records = merge_sequencial( left_records=sub_records, right_records=pub_records, left_stamp_key=sub_records.columns[0], right_stamp_key=pub_records.columns[0], join_left_key=None, join_right_key=None, columns=Columns(sub_records.columns + pub_records.columns).as_list(), how='left_use_latest', progress_label='binding use_latest_message.' ) drop_columns = list(set(pub_sub_records.columns) - set(columns)) pub_sub_records.drop_columns(drop_columns) pub_sub_records.reindex(columns) return pub_sub_records @staticmethod def _validate( node_path: NodePathStructValue, context: UseLatestMessage, ) -> None: def is_valid() -> bool: if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'UseLatest cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class NodeRecordsTilde: def __init__( self, provider: RecordsProviderLttng, node_path: NodePathStructValue, ) -> None: if node_path.message_context is None: raise UnsupportedNodeRecordsError('node_path.message context is None') if not isinstance(node_path.message_context, Tilde): raise UnsupportedNodeRecordsError('node_path.message context is not UseLatestMessage') self._provider = provider self._context: MessageContext = node_path.message_context self._validate(node_path, self._context) self._node_path = node_path def to_records(self): tilde_records = self._provider.tilde_records( self._node_path.subscription, self._node_path.publisher) sub_records = self._provider.subscribe_records(self._node_path.subscription) pub_records = self._provider.publish_records(self._node_path.publisher) left_stamp_key = Util.find_one( lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, sub_records.columns) right_stamp_key = Util.find_one( lambda x: COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP in x, sub_records.columns) records = merge_sequencial( left_records=sub_records, right_records=tilde_records, left_stamp_key=left_stamp_key, right_stamp_key=right_stamp_key, join_left_key=None, join_right_key=None, columns=Columns(sub_records.columns + tilde_records.columns).as_list(), how='left', progress_label='binding tilde subscribe records.' ) left_stamp_key = Util.find_one( lambda x: COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP in x, records.columns) right_stamp_key = Util.find_one( lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, pub_records.columns) records = merge_sequencial( left_records=records, right_records=pub_records, left_stamp_key=left_stamp_key, right_stamp_key=right_stamp_key, join_left_key=None, join_right_key=None, columns=Columns(records.columns + pub_records.columns).as_list(), how='left', progress_label='binding tilde publish records.' ) columns = [ Util.find_one(lambda x: COLUMN_NAME.CALLBACK_START_TIMESTAMP in x, records.columns), Util.find_one(lambda x: COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP in x, records.columns), ] drop_columns = list(set(records.columns) - set(columns)) records.drop_columns(drop_columns) records.reindex(columns) return records @staticmethod def _validate( node_path: NodePathStructValue, context: MessageContext, ) -> None: def is_valid() -> bool: if not isinstance(context, Tilde): return False if context.publisher_topic_name != node_path.publish_topic_name: return False if context.subscription_topic_name != node_path.subscribe_topic_name: return False return True if is_valid(): return None msg = f'UseLatest cannot build records. \n{node_path} \n{context}' raise UnsupportedNodeRecordsError(msg) class FilteredRecordsSource: def __init__(self, lttng: Lttng): self._lttng = lttng def tilde_subscribe_records( self, tilde_subscription: int ) -> RecordsInterface: """ Compose filtered tilde subscribe records. Parameters ---------- tilde_subscription : int Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_tilde_subscribe_records() records.filter_if( lambda x: x.get('tilde_subscription') == tilde_subscription ) records.drop_columns(['tilde_subscription]) """ sub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.TILDE_SUBSCRIBE_TIMESTAMP, COLUMN_NAME.TILDE_SUBSCRIPTION, COLUMN_NAME.TILDE_MESSAGE_ID ] ) if tilde_subscription is not None and \ tilde_subscription in self._grouped_tilde_sub_records: sub_records_ = self._grouped_tilde_sub_records[tilde_subscription].clone() sub_records.concat(sub_records_) sub_records.drop_columns([COLUMN_NAME.TILDE_SUBSCRIPTION]) return sub_records def sub_records( self, inter_callback_object: int, intra_callback_object: Optional[int] ) -> RecordsInterface: """ Compose filtered subscribe records. Parameters ---------- inter_callback_object : int intra_callback_object : Optional[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_subscribe_records() records.filter_if( lambda x: x.get('callback_object') in [ inter_callback_object, intra_callback_object ] ) """ sub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, ] ) records = self._grouped_sub_records if inter_callback_object in records: sub_records.concat(records[inter_callback_object].clone()) if intra_callback_object is not None and intra_callback_object in records: intra_sub_records = records[intra_callback_object].clone() sub_records.concat(intra_sub_records) sub_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP) return sub_records def inter_comm_records( self, publisher_handles: List[int], callback_object: int ) -> RecordsInterface: """ Compose filtered inter communication records. Parameters ---------- publisher_handles : List[int] callback_object : int Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_inter_proc_comm_records() records.filter_if( lambda x: x.get('callback_object') == callback_object and x.get('publisher_handle') in publisher_handles ) """ records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.PUBLISHER_HANDLE, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP ] ) for publisher_handle in publisher_handles: key = (callback_object, publisher_handle) if key in self._grouped_inter_comm_records: comm_records = self._grouped_inter_comm_records[key].clone() records.concat(comm_records) records.sort(COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP) return records def intra_comm_records( self, publisher_handles: List[int], intra_callback_object: Optional[int] ) -> RecordsInterface: """ Compose filtered intra communication records. Parameters ---------- publisher_handles : List[int] [description] intra_callback_object : Optional[int] [description] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_intra_proc_comm_records() records.filter_if( lambda x: x.get('callback_object') == callback_object and x.get('publisher_handle') in publisher_handles ) """ records = RecordsFactory.create_instance( None, [ COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.PUBLISHER_HANDLE, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP ] ) if intra_callback_object is not None: for publisher_handle in publisher_handles: key = (intra_callback_object, publisher_handle) if key in self._grouped_intra_comm_records: records_ = self._grouped_intra_comm_records[key].clone() records.concat(records_) records.sort(COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP) return records def publish_records( self, publisher_handles: List[int], ) -> RecordsInterface: """ Compose publish records. Parameters ---------- publisher_handles : List[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_publish_records() records.filter_if( lambda x: x.get('publisher_handle') in publisher_handles ] ) """ records = self._grouped_publish_records pub_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.RCLCPP_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTRA_PUBLISH_TIMESTAMP, COLUMN_NAME.RCLCPP_INTER_PUBLISH_TIMESTAMP, COLUMN_NAME.RCL_PUBLISH_TIMESTAMP, COLUMN_NAME.DDS_WRITE_TIMESTAMP, COLUMN_NAME.MESSAGE_TIMESTAMP, COLUMN_NAME.SOURCE_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_MESSAGE_ID, ] ) for publisher_handle in publisher_handles: if publisher_handle in records: inter_pub_records = records[publisher_handle].clone() pub_records.concat(inter_pub_records) return pub_records def tilde_publish_records( self, tilde_publishers: Sequence[int] ) -> RecordsInterface: """ Compose tilde publish records. Parameters ---------- tilde_publishers : Sequence[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_tilde_publish_records() records.filter_if( lambda x: x.get('tilde_publisher') in tilde_publishers ) """ tilde_grouped_records = self._grouped_tilde_pub_records tilde_records = RecordsFactory.create_instance( None, [ COLUMN_NAME.TILDE_PUBLISH_TIMESTAMP, COLUMN_NAME.TILDE_PUBLISHER, COLUMN_NAME.TILDE_MESSAGE_ID, COLUMN_NAME.TILDE_SUBSCRIPTION, ]) for tilde_publisher in tilde_publishers: if tilde_publisher in tilde_grouped_records: tilde_records_ = tilde_grouped_records[tilde_publisher].clone() tilde_records.concat(tilde_records_) tilde_records.drop_columns([COLUMN_NAME.TILDE_PUBLISHER]) return tilde_records def _expand_key_tuple( self, group: Dict[Tuple[int, ...], RecordsInterface] ) -> Dict[int, RecordsInterface]: group_: Dict[int, RecordsInterface] = {} for key in group.keys(): assert len(key) == 1 group_[key[0]] = group[key] return group_ def callback_records( self, inter_callback_object: int, intra_callback_object: Optional[int] ) -> RecordsInterface: """ Compose callback records. Parameters ---------- inter_callback_object : int intra_callback_object : Optional[int] Returns ------- RecordsInterface Equivalent to the following process. records = lttng.compose_callback_records() records.filter_if( lambda x: x.['callback_object] in [inter_callback_object, intra_callback_object] ) """ records = self._grouped_callback_records callback_records = RecordsFactory.create_instance( None, [COLUMN_NAME.CALLBACK_START_TIMESTAMP, COLUMN_NAME.CALLBACK_END_TIMESTAMP] ) if inter_callback_object in records: inter_callback_records = records[inter_callback_object].clone() callback_records.concat(inter_callback_records) if intra_callback_object is not None and intra_callback_object in records: intra_callback_records = records[intra_callback_object].clone() callback_records.concat(intra_callback_records) callback_records.sort(COLUMN_NAME.CALLBACK_START_TIMESTAMP) return callback_records @cached_property def _grouped_callback_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_callback_records() group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT]) return self._expand_key_tuple(group) @cached_property def _grouped_inter_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]: records = self._lttng.compose_inter_proc_comm_records() return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE]) @cached_property def _grouped_intra_comm_records(self) -> Dict[Tuple[int, ...], RecordsInterface]: records = self._lttng.compose_intra_proc_comm_records() return records.groupby([COLUMN_NAME.CALLBACK_OBJECT, COLUMN_NAME.PUBLISHER_HANDLE]) @cached_property def _grouped_publish_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_publish_records() group = records.groupby([COLUMN_NAME.PUBLISHER_HANDLE]) return self._expand_key_tuple(group) @cached_property def _grouped_sub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_subscribe_records() group = records.groupby([COLUMN_NAME.CALLBACK_OBJECT]) return self._expand_key_tuple(group) @cached_property def _grouped_tilde_pub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_tilde_publish_records() group = records.groupby([COLUMN_NAME.TILDE_PUBLISHER]) return self._expand_key_tuple(group) @cached_property def _grouped_tilde_sub_records(self) -> Dict[int, RecordsInterface]: records = self._lttng.compose_tilde_subscribe_records() group = records.groupby([COLUMN_NAME.TILDE_SUBSCRIPTION]) return self._expand_key_tuple(group)
[ [ [ 631, 646 ], [ 50583, 50598 ], [ 50842, 50857 ], [ 51106, 51121 ], [ 51370, 51385 ], [ 51628, 51643 ], [ 51883, 51898 ], [ 52148, 52163 ] ], [ [ 667, 676 ], [ 1860, 1869 ] ], [ [ 696, 700 ], [ 49008, 49012 ], [ 48959, 48963 ], [ 49053, 49057 ], [ 50642, 50646 ], [ 50903, 50907 ], [ 51167, 51171 ], [ 51428, 51432 ], [ 51682, 51686 ], [ 51943, 51947 ], [ 52208, 52212 ] ], [ [ 702, 706 ], [ 22128, 22132 ], [ 27091, 27095 ], [ 27373, 27377 ], [ 27755, 27759 ], [ 43301, 43305 ], [ 44767, 44771 ], [ 46325, 46329 ] ], [ [ 708, 716 ], [ 18861, 18869 ], [ 22366, 22374 ], [ 22401, 22409 ], [ 25204, 25212 ], [ 26175, 26183 ], [ 26611, 26619 ], [ 26858, 26866 ], [ 41935, 41943 ], [ 44809, 44817 ], [ 49322, 49330 ] ], [ [ 718, 726 ], [ 47739, 47747 ] ], [ [ 728, 733 ], [ 25193, 25198 ], [ 26164, 26169 ], [ 48964, 48969 ], [ 50908, 50913 ], [ 51172, 51177 ] ], [ [ 735, 740 ], [ 16263, 16268 ] ], [ [ 798, 812 ], [ 37661, 37675 ], [ 39976, 39990 ] ], [ [ 814, 832 ], [ 3822, 3840 ], [ 3985, 4003 ], [ 4163, 4181 ], [ 4333, 4351 ] ], [ [ 853, 858 ], [ 2142, 2147 ], [ 24998, 25003 ], [ 40590, 40595 ] ], [ [ 886, 905 ], [ 27760, 27779 ] ], [ [ 907, 937 ], [ 27942, 27972 ] ], [ [ 960, 967 ], [ 9679, 9686 ], [ 14093, 14100 ], [ 15682, 15689 ], [ 29120, 29127 ], [ 29827, 29834 ], [ 31060, 31067 ], [ 33976, 33983 ], [ 36227, 36234 ], [ 38648, 38655 ], [ 39339, 39346 ] ], [ [ 969, 973 ], [ 30223, 30227 ], [ 30428, 30432 ], [ 38130, 38134 ], [ 38257, 38261 ], [ 38834, 38838 ], [ 38956, 38960 ], [ 39524, 39528 ], [ 39621, 39625 ] ], [ [ 1001, 1021 ], [ 7105, 7125 ], [ 8672, 8692 ], [ 16478, 16498 ], [ 16857, 16877 ] ], [ [ 1050, 1077 ], [ 4444, 4471 ], [ 31521, 31548 ], [ 31612, 31639 ], [ 31823, 31850 ], [ 32242, 32269 ], [ 32439, 32466 ], [ 32705, 32732 ], [ 32934, 32961 ], [ 34895, 34922 ], [ 35156, 35183 ], [ 35311, 35338 ], [ 37128, 37155 ], [ 37378, 37405 ], [ 37522, 37549 ], [ 40495, 40522 ] ], [ [ 1106, 1126 ], [ 25801, 25821 ] ], [ [ 1159, 1178 ], [ 1909, 1928 ] ], [ [ 1219, 1230 ], [ 5267, 5278 ], [ 5317, 5328 ], [ 7543, 7554 ], [ 7593, 7604 ], [ 7636, 7647 ], [ 9445, 9456 ], [ 9515, 9526 ], [ 9847, 9858 ], [ 9897, 9908 ], [ 9940, 9951 ], [ 9982, 9993 ], [ 10033, 10044 ], [ 11161, 11172 ], [ 11211, 11222 ], [ 11267, 11278 ], [ 11323, 11334 ], [ 11370, 11381 ], [ 11415, 11426 ], [ 11458, 11469 ], [ 14278, 14289 ], [ 14328, 14339 ], [ 14384, 14395 ], [ 14440, 14451 ], [ 14487, 14498 ], [ 14532, 14543 ], [ 14575, 14586 ], [ 14617, 14628 ], [ 14666, 14677 ], [ 15575, 15586 ], [ 15632, 15643 ], [ 15875, 15886 ], [ 15926, 15937 ], [ 20040, 20051 ], [ 20096, 20107 ], [ 20229, 20240 ], [ 20273, 20284 ], [ 21565, 21576 ], [ 21621, 21632 ], [ 21668, 21679 ], [ 21713, 21724 ], [ 21845, 21856 ], [ 21889, 21900 ], [ 22467, 22478 ], [ 22622, 22633 ], [ 22548, 22559 ], [ 22673, 22684 ], [ 22840, 22851 ], [ 22760, 22771 ], [ 22897, 22908 ], [ 23055, 23066 ], [ 22978, 22989 ], [ 23106, 23117 ], [ 23260, 23271 ], [ 23185, 23196 ], [ 23309, 23320 ], [ 23476, 23487 ], [ 23396, 23407 ], [ 23533, 23544 ], [ 23682, 23693 ], [ 23611, 23622 ], [ 23730, 23741 ], [ 23875, 23886 ], [ 23806, 23817 ], [ 23921, 23932 ], [ 24062, 24073 ], [ 23995, 24006 ], [ 24106, 24117 ], [ 24245, 24256 ], [ 24179, 24190 ], [ 24288, 24299 ], [ 24445, 24456 ], [ 24370, 24381 ], [ 24497, 24508 ], [ 24636, 24647 ], [ 24570, 24581 ], [ 24679, 24690 ], [ 24832, 24843 ], [ 24759, 24770 ], [ 33519, 33530 ], [ 33616, 33627 ], [ 41328, 41339 ], [ 41383, 41394 ], [ 41431, 41442 ], [ 41772, 41783 ], [ 42615, 42626 ], [ 42669, 42680 ], [ 42716, 42727 ], [ 43165, 43176 ], [ 44013, 44024 ], [ 44058, 44069 ], [ 44112, 44123 ], [ 44158, 44169 ], [ 44218, 44229 ], [ 44269, 44280 ], [ 44630, 44641 ], [ 45563, 45574 ], [ 45608, 45619 ], [ 45662, 45673 ], [ 45708, 45719 ], [ 45768, 45779 ], [ 46190, 46201 ], [ 46945, 46956 ], [ 46999, 47010 ], [ 47059, 47070 ], [ 47119, 47130 ], [ 47170, 47181 ], [ 47219, 47230 ], [ 47266, 47277 ], [ 47312, 47323 ], [ 47365, 47376 ], [ 48375, 48386 ], [ 48428, 48439 ], [ 48473, 48484 ], [ 48519, 48530 ], [ 48843, 48854 ], [ 49986, 49997 ], [ 50024, 50035 ], [ 50506, 50517 ], [ 50761, 50772 ], [ 51040, 51051 ], [ 51069, 51080 ], [ 51304, 51315 ], [ 51333, 51344 ], [ 51546, 51557 ], [ 51802, 51813 ], [ 52067, 52078 ], [ 52334, 52345 ], [ 30268, 30279 ], [ 30473, 30484 ], [ 38167, 38178 ], [ 38294, 38305 ], [ 38871, 38882 ], [ 38993, 39004 ], [ 39548, 39559 ], [ 39645, 39656 ] ], [ [ 1254, 1259 ], [ 15465, 15470 ], [ 28910, 28915 ], [ 29617, 29622 ] ], [ [ 1261, 1277 ], [ 9309, 9325 ], [ 13801, 13817 ], [ 18240, 18256 ], [ 30755, 30771 ], [ 33674, 33690 ], [ 35944, 35960 ], [ 38378, 38394 ], [ 39075, 39091 ] ], [ [ 1279, 1293 ], [ 3739, 3753 ], [ 41248, 41262 ], [ 42535, 42549 ], [ 43933, 43947 ], [ 45483, 45497 ], [ 46865, 46879 ], [ 48295, 48309 ], [ 49923, 49937 ] ], [ [ 1295, 1311 ], [ 2398, 2414 ], [ 3470, 3486 ], [ 4699, 4715 ], [ 5616, 5632 ], [ 6523, 6539 ], [ 7993, 8009 ], [ 10371, 10387 ], [ 11728, 11744 ], [ 12776, 12792 ], [ 14981, 14997 ], [ 17311, 17327 ], [ 17742, 17758 ], [ 17856, 17872 ], [ 19107, 19123 ], [ 20550, 20566 ], [ 22101, 22117 ], [ 22325, 22341 ], [ 40715, 40731 ], [ 41958, 41974 ], [ 43350, 43366 ], [ 44832, 44848 ], [ 46345, 46361 ], [ 47762, 47778 ], [ 49018, 49034 ], [ 48981, 48997 ], [ 49063, 49079 ], [ 49345, 49361 ], [ 50652, 50668 ], [ 50925, 50941 ], [ 51189, 51205 ], [ 51438, 51454 ], [ 51692, 51708 ], [ 51953, 51969 ], [ 52218, 52234 ] ], [ [ 1343, 1356 ], [ 31722, 31735 ] ], [ [ 1358, 1377 ], [ 4670, 4689 ], [ 25164, 25183 ], [ 28400, 28419 ], [ 28741, 28760 ], [ 30110, 30129 ] ], [ [ 1409, 1433 ], [ 2364, 2388 ], [ 18827, 18851 ], [ 19072, 19096 ], [ 20516, 20540 ] ], [ [ 1435, 1453 ], [ 32823, 32841 ], [ 33025, 33043 ], [ 34439, 34457 ] ], [ [ 1485, 1504 ], [ 3440, 3459 ], [ 28163, 28182 ], [ 31427, 31446 ], [ 32605, 32624 ], [ 34401, 34420 ], [ 35056, 35075 ], [ 36645, 36664 ], [ 37278, 37297 ], [ 39938, 39957 ] ], [ [ 1506, 1526 ], [ 10341, 10361 ], [ 11698, 11718 ], [ 12746, 12766 ], [ 14951, 14971 ], [ 16269, 16289 ], [ 27061, 27081 ], [ 27343, 27363 ], [ 27725, 27745 ] ], [ [ 1528, 1531 ], [ 16325, 16328 ] ], [ [ 1563, 1594 ], [ 19692, 19723 ], [ 21242, 21273 ], [ 25378, 25409 ], [ 26123, 26154 ], [ 26323, 26354 ], [ 26570, 26601 ], [ 26817, 26848 ], [ 27901, 27932 ] ], [ [ 1626, 1649 ], [ 5583, 5606 ], [ 6490, 6513 ], [ 7960, 7983 ], [ 14907, 14930 ], [ 16291, 16314 ], [ 16361, 16384 ] ], [ [ 1681, 1686 ], [ 37496, 37501 ], [ 40078, 40083 ] ], [ [ 1718, 1742 ], [ 25252, 25276 ], [ 25895, 25919 ] ], [ [ 1774, 1790 ], [ 35274, 35290 ], [ 35450, 35466 ], [ 36683, 36699 ] ], [ [ 1822, 1848 ], [ 17275, 17301 ], [ 29360, 29386 ] ], [ [ 1851, 1857 ], [ 3703, 3709 ], [ 16971, 16977 ] ], [ [ 1888, 1908 ], [ 28122, 28142 ], [ 32564, 32584 ], [ 35015, 35035 ], [ 37237, 37257 ] ], [ [ 24923, 24949 ], [ 2266, 2292 ] ], [ [ 28046, 28070 ], [ 3876, 3900 ] ], [ [ 32479, 32512 ], [ 4045, 4078 ] ], [ [ 34936, 34963 ], [ 4221, 4248 ] ], [ [ 37169, 37185 ], [ 4378, 4394 ] ], [ [ 40536, 40557 ], [ 2214, 2235 ] ] ]
#!/usr/bin/env python3 # -*- coding:utf-8 -*- import yaml class HexahueMap(): def __init__(self, space_color): pink = (255, 0, 255) red = (255, 0, 0) green = (0, 255, 0) yellow = (255, 255, 0) blue = (0, 0, 255) sky = (0, 255, 255) white = (255, 255, 255) gray = (128, 128, 128) black = (0, 0, 0) self.hmap = {} self.hmap[(pink, red, green, yellow, blue, sky)] = 'A' self.hmap[(red, pink, green, yellow, blue, sky)] = 'B' self.hmap[(red, green, pink, yellow, blue, sky)] = 'C' self.hmap[(red, green, yellow, pink, blue, sky)] = 'D' self.hmap[(red, green, yellow, blue, pink, sky)] = 'E' self.hmap[(red, green, yellow, blue, sky, pink)] = 'F' self.hmap[(green, red, yellow, blue, sky, pink)] = 'G' self.hmap[(green, yellow, red, blue, sky, pink)] = 'H' self.hmap[(green, yellow, blue, red, sky, pink)] = 'I' self.hmap[(green, yellow, blue, sky, red, pink)] = 'J' self.hmap[(green, yellow, blue, sky, pink, red)] = 'K' self.hmap[(yellow, green, blue, sky, pink, red)] = 'L' self.hmap[(yellow, blue, green, sky, pink, red)] = 'M' self.hmap[(yellow, blue, sky, green, pink, red)] = 'N' self.hmap[(yellow, blue, sky, pink, green, red)] = 'O' self.hmap[(yellow, blue, sky, pink, red, green)] = 'P' self.hmap[(blue, yellow, sky, pink, red, green)] = 'Q' self.hmap[(blue, sky, yellow, pink, red, green)] = 'R' self.hmap[(blue, sky, pink, yellow, red, green)] = 'S' self.hmap[(blue, sky, pink, red, yellow, green)] = 'T' self.hmap[(blue, sky, pink, red, green, yellow)] = 'U' self.hmap[(sky, blue, pink, red, green, yellow)] = 'V' self.hmap[(sky, pink, blue, red, green, yellow)] = 'W' self.hmap[(sky, pink, red, blue, green, yellow)] = 'X' self.hmap[(sky, pink, red, green, blue, yellow)] = 'Y' self.hmap[(sky, pink, red, green, yellow, blue)] = 'Z' self.hmap[(black, white, white, black, black, white)] = '.' self.hmap[(white, black, black, white, white, black)] = ',' if space_color == 'black': self.hmap[(black, black, black, black, black, black)] = ' ' elif space_color == 'white': self.hmap[(white, white, white, white, white, white)] = ' ' elif space_color == 'all': self.hmap[(black, black, black, black, black, black)] = ' ' self.hmap[(white, white, white, white, white, white)] = ' ' else: raise Exception('[Error] invalid space setting: ' + space_color) self.hmap[(black, gray, white, black, gray, white)] = '0' self.hmap[(gray, black, white, black, gray, white)] = '1' self.hmap[(gray, white, black, black, gray, white)] = '2' self.hmap[(gray, white, black, gray, black, white)] = '3' self.hmap[(gray, white, black, gray, white, black)] = '4' self.hmap[(white, gray, black, gray, white, black)] = '5' self.hmap[(white, black, gray, gray, white, black)] = '6' self.hmap[(white, black, gray, white, gray, black)] = '7' self.hmap[(white, black, gray, white, black, gray)] = '8' self.hmap[(black, white, gray, white, black, gray)] = '9'
[ [ [ 54, 58 ] ], [ [ 66, 76 ] ] ]
# 7 завдання for n in range(1, 101): print(n, "Я не буду їсти палички Бобо на уроці")
[ [ [ 17, 18 ], [ 47, 48 ] ] ]
""" SoftLayer.ordering ~~~~~~~~~~~~~~~~~~ Ordering Manager :license: MIT, see LICENSE for more details. """ class OrderingManager(object): """Manages hardware devices. :param SoftLayer.API.Client client: an API client instance """ def __init__(self, client): self.client = client def get_packages_of_type(self, package_types, mask=None): """Get packages that match a certain type. Each ordering package has a type, so return all packages that match the types we are looking for :param list package_types: List of strings representing the package type keynames we are interested in. :param string mask: Mask to specify the properties we want to retrieve """ package_service = self.client['Product_Package'] _filter = { 'type': { 'keyName': { 'operation': 'in', 'options': [ {'name': 'data', 'value': package_types} ], }, }, } packages = package_service.getAllObjects(mask=mask, filter=_filter) packages = self.filter_outlet_packages(packages) return packages @staticmethod def filter_outlet_packages(packages): """Remove packages designated as OUTLET. Those type of packages must be handled in a different way, and they are not supported at the moment. :param packages: Dictionary of packages. Name and description keys must be present in each of them. """ non_outlet_packages = [] for package in packages: if all(['OUTLET' not in package.get('description', '').upper(), 'OUTLET' not in package.get('name', '').upper()]): non_outlet_packages.append(package) return non_outlet_packages @staticmethod def get_only_active_packages(packages): """Return only active packages. If a package is active, it is eligible for ordering This will inspect the 'isActive' property on the provided packages :param packages Dictionary of packages, isActive key must be present """ active_packages = [] for package in packages: if package['isActive']: active_packages.append(package) return active_packages def get_package_by_type(self, package_type, mask=None): """Get a single package of a given type. Syntactic sugar to retrieve a single package of a given type. If multiple packages share the given type, this will return the first one returned by the API. If no packages are found, returns None :param package_type string representing the package type key name we are interested in """ packages = self.get_packages_of_type([package_type], mask) if len(packages) == 0: return None else: return packages.pop() def get_package_id_by_type(self, package_type): """Return the package ID of a Product Package with a given type. :param package_type string representing the package type key name we are interested in :raises ValueError when no package of the given type is found """ mask = "mask[id, name, description, isActive, type[keyName]]" package = self.get_package_by_type(package_type, mask) if package: return package['id'] else: raise ValueError("No package found for type: " + package_type) def get_quotes(self): """Retrieve a list of quotes. :return a list of SoftLayer_Billing_Order_Quote """ quotes = self.client['Account'].getActiveQuotes() return quotes def get_quote_details(self, quote_id): """Retrieve quote details. :param quote_id ID number of target quote """ quote = self.client['Billing_Order_Quote'].getObject(id=quote_id) return quote def get_order_container(self, quote_id): """Generate an order container from a quote object. :param quote_id ID number of target quote """ quote = self.client['Billing_Order_Quote'] container = quote.getRecalculatedOrderContainer(id=quote_id) return container['orderContainers'][0] def generate_order_template(self, quote_id, extra, quantity=1): """Generate a complete order template. :param int quote_id: ID of target quote :param list extra: List of dictionaries that have extra details about the order such as hostname or domain names for virtual servers or hardware nodes :param int quantity: Number of ~things~ to order """ container = self.get_order_container(quote_id) container['quantity'] = quantity # NOTE(kmcdonald): This will only work with virtualGuests and hardware. # There has to be a better way, since this is based on # an existing quote that supposedly knows about this # detail if container['packageId'] == 46: product_type = 'virtualGuests' else: product_type = 'hardware' if len(extra) != quantity: raise ValueError("You must specify extra for each server in the " "quote") container[product_type] = [] for extra_details in extra: container[product_type].append(extra_details) container['presetId'] = None return container def verify_quote(self, quote_id, extra, quantity=1): """Verifies that a quote order is valid. :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new servers :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].verifyOrder(container) def order_quote(self, quote_id, extra, quantity=1): """Places an order using a quote :param int quote_id: ID for the target quote :param list hostnames: hostnames of the servers :param string domain: domain of the new server :param int quantity: Quantity to override default """ container = self.generate_order_template(quote_id, extra, quantity=quantity) return self.client['Product_Order'].placeOrder(container)
[ [ [ 133, 148 ] ] ]
# BOJ 14501 import sys si = sys.stdin.readline t = [0] * 17 dp = [0] * 17 n = int(si()) for i in range(1, n + 1): m, o = map(int, si().split()) t[i] = m dp[i] = o def solve(n): ans = 0 for i in range(n, 0, -1): if i + t[i] > n + 1: dp[i] = dp[i + 1] else: dp[i] = max(dp[i + 1], dp[i] + dp[i + t[i]]) ans = max(ans, dp[i]) return ans print(solve(n))
[ [ [ 19, 22 ], [ 29, 32 ] ], [ [ 24, 26 ], [ 86, 88 ], [ 138, 140 ] ], [ [ 50, 51 ], [ 156, 157 ], [ 252, 253 ], [ 360, 361 ] ], [ [ 63, 65 ], [ 169, 171 ], [ 286, 288 ], [ 278, 280 ], [ 334, 336 ], [ 345, 347 ], [ 353, 355 ], [ 322, 324 ], [ 394, 396 ] ], [ [ 78, 79 ], [ 110, 111 ], [ 430, 431 ] ], [ [ 96, 97 ], [ 158, 159 ], [ 172, 173 ] ], [ [ 122, 123 ], [ 163, 164 ] ], [ [ 125, 126 ], [ 177, 178 ] ], [ [ 185, 190 ], [ 424, 429 ] ] ]
import torch.utils.data as data from PIL import Image import torchvision.transforms as transforms from torchvision.transforms import InterpolationMode class BaseDataset(data.Dataset): def __init__(self): super(BaseDataset, self).__init__() def name(self): return 'BaseDataset' def initialize(self, opt): pass def get_transform(opt): transform_list = [] if opt.resize_or_crop == 'resize_and_crop': osize = [opt.loadSize, opt.loadSize] transform_list.append(transforms.Resize(osize, InterpolationMode.BICUBIC)) transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'crop': transform_list.append(transforms.RandomCrop(opt.fineSize)) elif opt.resize_or_crop == 'scale_width': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.fineSize))) elif opt.resize_or_crop == 'scale_width_and_crop': transform_list.append(transforms.Lambda( lambda img: __scale_width(img, opt.loadSize))) transform_list.append(transforms.RandomCrop(opt.fineSize)) if opt.isTrain and not opt.no_flip: transform_list.append(transforms.RandomHorizontalFlip()) transform_list += [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] return transforms.Compose(transform_list) def __scale_width(img, target_width): ow, oh = img.size if (ow == target_width): return img w = target_width h = int(target_width * oh / ow) return img.resize((w, h), Image.BICUBIC)
[ [ [ 7, 31 ], [ 170, 174 ] ], [ [ 48, 53 ], [ 1648, 1653 ] ], [ [ 61, 97 ], [ 520, 530 ], [ 603, 613 ], [ 709, 719 ], [ 822, 832 ], [ 985, 995 ], [ 1093, 1103 ], [ 1201, 1211 ], [ 1260, 1270 ], [ 1306, 1316 ], [ 1417, 1427 ] ], [ [ 133, 150 ], [ 545, 562 ] ], [ [ 158, 169 ], [ 223, 234 ] ], [ [ 353, 366 ] ], [ [ 1457, 1470 ], [ 865, 878 ], [ 1028, 1041 ] ] ]
""" Created Oct 19, 2017 @author: Spencer Vatrt-Watts (github.com/Spenca) """ from __future__ import unicode_literals from django.apps import AppConfig class TenxConfig(AppConfig): name = 'tenx'
[ [ [ 103, 119 ] ], [ [ 145, 154 ], [ 174, 183 ] ], [ [ 163, 173 ] ] ]