code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1",
manifest={
"CreateTrainingPipelineRequest",
"GetTrainingPipelineRequest",
"ListTrainingPipelinesRequest",
"ListTrainingPipelinesResponse",
"DeleteTrainingPipelineRequest",
"CancelTrainingPipelineRequest",
"CreatePipelineJobRequest",
"GetPipelineJobRequest",
"ListPipelineJobsRequest",
"ListPipelineJobsResponse",
"DeletePipelineJobRequest",
"CancelPipelineJobRequest",
},
)
class CreateTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CreateTrainingPipeline].
Attributes:
parent (str):
Required. The resource name of the Location to create the
TrainingPipeline in. Format:
``projects/{project}/locations/{location}``
training_pipeline (google.cloud.aiplatform_v1.types.TrainingPipeline):
Required. The TrainingPipeline to create.
"""
parent = proto.Field(proto.STRING, number=1,)
training_pipeline = proto.Field(
proto.MESSAGE, number=2, message=gca_training_pipeline.TrainingPipeline,
)
class GetTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.GetTrainingPipeline].
Attributes:
name (str):
Required. The name of the TrainingPipeline resource. Format:
``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListTrainingPipelinesRequest(proto.Message):
r"""Request message for
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines].
Attributes:
parent (str):
Required. The resource name of the Location to list the
TrainingPipelines from. Format:
``projects/{project}/locations/{location}``
filter (str):
Lists the PipelineJobs that match the filter expression. The
following fields are supported:
- ``pipeline_name``: Supports ``=`` and ``!=`` comparisons.
- ``create_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``update_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``end_time``: Supports ``=``, ``!=``, ``<``, ``>``,
``<=``, and ``>=`` comparisons. Values must be in RFC
3339 format.
- ``labels``: Supports key-value equality and key presence.
Filter expressions can be combined together using logical
operators (``AND`` & ``OR``). For example:
``pipeline_name="test" AND create_time>"2020-05-18T13:30:00Z"``.
The syntax to define filter expression is based on
https://google.aip.dev/160.
Examples:
- ``create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"``
PipelineJobs created or updated after 2020-05-18 00:00:00
UTC.
- ``labels.env = "prod"`` PipelineJobs with label "env" set
to "prod".
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained via
[ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesResponse.next_page_token]
of the previous
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
call.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Mask specifying which fields to read.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
class ListTrainingPipelinesResponse(proto.Message):
r"""Response message for
[PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1.PipelineService.ListTrainingPipelines]
Attributes:
training_pipelines (Sequence[google.cloud.aiplatform_v1.types.TrainingPipeline]):
List of TrainingPipelines in the requested
page.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1.ListTrainingPipelinesRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
training_pipelines = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_training_pipeline.TrainingPipeline,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.DeleteTrainingPipeline].
Attributes:
name (str):
Required. The name of the TrainingPipeline resource to be
deleted. Format:
``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
"""
name = proto.Field(proto.STRING, number=1,)
class CancelTrainingPipelineRequest(proto.Message):
r"""Request message for
[PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1.PipelineService.CancelTrainingPipeline].
Attributes:
name (str):
Required. The name of the TrainingPipeline to cancel.
Format:
``projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}``
"""
name = proto.Field(proto.STRING, number=1,)
class CreatePipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.CreatePipelineJob][google.cloud.aiplatform.v1.PipelineService.CreatePipelineJob].
Attributes:
parent (str):
Required. The resource name of the Location to create the
PipelineJob in. Format:
``projects/{project}/locations/{location}``
pipeline_job (google.cloud.aiplatform_v1.types.PipelineJob):
Required. The PipelineJob to create.
pipeline_job_id (str):
The ID to use for the PipelineJob, which will become the
final component of the PipelineJob name. If not provided, an
ID will be automatically generated.
This value should be less than 128 characters, and valid
characters are /[a-z][0-9]-/.
"""
parent = proto.Field(proto.STRING, number=1,)
pipeline_job = proto.Field(
proto.MESSAGE, number=2, message=gca_pipeline_job.PipelineJob,
)
pipeline_job_id = proto.Field(proto.STRING, number=3,)
class GetPipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.GetPipelineJob][google.cloud.aiplatform.v1.PipelineService.GetPipelineJob].
Attributes:
name (str):
Required. The name of the PipelineJob resource. Format:
``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListPipelineJobsRequest(proto.Message):
r"""Request message for
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs].
Attributes:
parent (str):
Required. The resource name of the Location to list the
PipelineJobs from. Format:
``projects/{project}/locations/{location}``
filter (str):
The standard list filter. Supported fields:
- ``display_name`` supports ``=`` and ``!=``.
- ``state`` supports ``=`` and ``!=``.
The following examples demonstrate how to filter the list of
PipelineJobs:
- ``state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"``
- ``state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"``
- ``NOT display_name="my_pipeline"``
- ``state="PIPELINE_STATE_FAILED"``
page_size (int):
The standard list page size.
page_token (str):
The standard list page token. Typically obtained via
[ListPipelineJobsResponse.next_page_token][google.cloud.aiplatform.v1.ListPipelineJobsResponse.next_page_token]
of the previous
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]
call.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
class ListPipelineJobsResponse(proto.Message):
r"""Response message for
[PipelineService.ListPipelineJobs][google.cloud.aiplatform.v1.PipelineService.ListPipelineJobs]
Attributes:
pipeline_jobs (Sequence[google.cloud.aiplatform_v1.types.PipelineJob]):
List of PipelineJobs in the requested page.
next_page_token (str):
A token to retrieve the next page of results. Pass to
[ListPipelineJobsRequest.page_token][google.cloud.aiplatform.v1.ListPipelineJobsRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
pipeline_jobs = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_pipeline_job.PipelineJob,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeletePipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.DeletePipelineJob][google.cloud.aiplatform.v1.PipelineService.DeletePipelineJob].
Attributes:
name (str):
Required. The name of the PipelineJob resource to be
deleted. Format:
``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}``
"""
name = proto.Field(proto.STRING, number=1,)
class CancelPipelineJobRequest(proto.Message):
r"""Request message for
[PipelineService.CancelPipelineJob][google.cloud.aiplatform.v1.PipelineService.CancelPipelineJob].
Attributes:
name (str):
Required. The name of the PipelineJob to cancel. Format:
``projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}``
"""
name = proto.Field(proto.STRING, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
| sasha-gitg/python-aiplatform | google/cloud/aiplatform_v1/types/pipeline_service.py | Python | apache-2.0 | 11,878 |
"""Creates a line spectrum and plots it
"""
import numpy as np
import hyperspy.api as hs
import matplotlib.pyplot as plt
# Create a line spectrum with random data
s = hs.signals.Spectrum(np.random.random((100, 1024)))
# Define the axis properties
s.axes_manager.signal_axes[0].name = 'Energy'
s.axes_manager.signal_axes[0].units = 'eV'
s.axes_manager.signal_axes[0].scale = 0.3
s.axes_manager.signal_axes[0].offset = 100
s.axes_manager.navigation_axes[0].name = 'time'
s.axes_manager.navigation_axes[0].units = 'fs'
s.axes_manager.navigation_axes[0].scale = 0.3
s.axes_manager.navigation_axes[0].offset = 100
# Give a title
s.metadata.General.title = 'Random line spectrum'
# Plot it
s.plot()
plt.show() # No necessary when running in the HyperSpy's IPython profile
| to266/hyperspy | examples/data_navigation/line_spectrum.py | Python | gpl-3.0 | 774 |
#!/usr/bin/env python
from snmp_helper import snmp_get_oid, snmp_extract
# Beginning of setup
COMMUNITY_STRING = 'galileo'
RT1_SNMP_PORT = 7961
RT2_SNMP_PORT = 8061
IP = '50.76.53.27'
pynet_router1 = (IP, COMMUNITY_STRING, RT1_SNMP_PORT)
pynet_router2 = (IP, COMMUNITY_STRING, RT2_SNMP_PORT)
OID_sysName = "1.3.6.1.2.1.1.5.0"
OID_sysDescr = "1.3.6.1.2.1.1.1.0"
# End of setup
snmp_sysName = snmp_get_oid(pynet_router1, oid=OID_sysName)
snmp_sysDescr = snmp_get_oid(pynet_router1, oid=OID_sysDescr)
sysName = snmp_extract(snmp_sysName)
sysDescr = snmp_extract(snmp_sysDescr)
print "Router 1: %s\nSystem name: %s\nSystem Description: %s" % \
(pynet_router1, sysName, sysDescr)
print "\n========================"
snmp_sysName = snmp_get_oid(pynet_router2, oid=OID_sysName)
snmp_sysDescr = snmp_get_oid(pynet_router2, oid=OID_sysDescr)
sysName = snmp_extract(snmp_sysName)
sysDescr = snmp_extract(snmp_sysDescr)
print "Router 2: %s\nSystem name: %s\nSystem Description: %s" % \
(pynet_router2, sysName, sysDescr)
| philuu12/PYTHON_4_NTWK_ENGRS | wk2_hw/snmp_assign4.py | Python | apache-2.0 | 1,028 |
#!/usr/bin/env python3
import argparse
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from basagc import config # need to import this first to set debug flag
if __name__ == "__main__":
# arg parser for debug flag
parser = argparse.ArgumentParser(description='basaGC: AGC for KSP')
parser.add_argument('-d','--debug', help='Set debug mode on', required=False, action='store_true')
args = parser.parse_args()
if args.debug:
config.DEBUG = True
config.current_log_level = "DEBUG"
print("================DEBUG MODE================")
from basagc import gui, computer # import the rest
app = QApplication(sys.argv)
main_window = QMainWindow()
ui = gui.GUI(main_window)
computer = computer.Computer(ui)
main_window.setWindowTitle('basaGC');
main_window.show()
sys.exit(app.exec_())
| cashelcomputers/basaGC | basagc.py | Python | gpl-2.0 | 892 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyHyperframe(PythonPackage):
"""HTTP/2 framing layer for Python"""
homepage = "https://github.com/python-hyper/hyperframe/"
pypi = "hyperframe/hyperframe-6.0.0.tar.gz"
version('6.0.0', sha256='742d2a4bc3152a340a49d59f32e33ec420aa8e7054c1444ef5c7efff255842f1')
depends_on('py-setuptools', type='build')
depends_on('py-wheel', type='build')
| LLNL/spack | var/spack/repos/builtin/packages/py-hyperframe/package.py | Python | lgpl-2.1 | 592 |
"""
========================
Mack Chainladder Example
========================
This example demonstrates how you can can use the Mack Chainladder method.
"""
import pandas as pd
import chainladder as cl
# Load the data
data = cl.load_sample('raa')
# Compute Mack Chainladder ultimates and Std Err using 'volume' average
mack = cl.MackChainladder()
dev = cl.Development(average='volume')
mack.fit(dev.fit_transform(data))
# Plotting
plot_data = mack.summary_.to_frame()
g = plot_data[['Latest', 'IBNR']].plot(
kind='bar', stacked=True, ylim=(0, None), grid=True,
yerr=pd.DataFrame({'latest': plot_data['Mack Std Err']*0,
'IBNR': plot_data['Mack Std Err']}),
title='Mack Chainladder Ultimate').set(
xlabel='Accident Year', ylabel='Loss');
| jbogaardt/chainladder-python | examples/plot_mack.py | Python | mit | 780 |
#-*- coding: utf-8 -*-
'''
Created on 25 сент. 2010
@author: ivan
'''
import logging
import os
import re
import thread
from gi.repository import Gtk
from gi.repository import GLib
from foobnix.fc.fc import FC
from foobnix.playlists.pls_reader import update_id3_for_pls
from foobnix.util import const, idle_task
from foobnix.helpers.menu import Popup
from foobnix.util.bean_utils import get_bean_from_file
from foobnix.util.id3_util import update_id3
from foobnix.util.tag_util import edit_tags
from foobnix.util.converter import convert_files
from foobnix.util.audio import get_mutagen_audio
from foobnix.util.file_utils import open_in_filemanager, copy_to, get_files_from_gtk_selection_data,\
get_file_extension, is_playlist
from foobnix.util.localization import foobnix_localization
from foobnix.gui.treeview.common_tree import CommonTreeControl
from foobnix.util.key_utils import KEY_RETURN, is_key, KEY_DELETE, \
is_modificator
from foobnix.util.mouse_utils import is_double_left_click, \
is_rigth_click, right_click_optimization_for_trees, is_empty_click
from foobnix.playlists.m3u_reader import update_id3_for_m3u
foobnix_localization()
FLAG = False
class PlaylistTreeControl(CommonTreeControl):
def __init__(self, controls):
CommonTreeControl.__init__(self, controls)
self.header_pressed = False
self.menu = Popup()
self.tree_menu = Popup()
self.full_name = ""
self.label = Gtk.Label()
self.set_headers_visible(True)
self.set_headers_clickable(True)
self.set_reorderable(True)
"""Column icon"""
self.icon_col = Gtk.TreeViewColumn(None, Gtk.CellRendererPixbuf(), icon_name=self.play_icon[0])
self.icon_col.key = "*"
self.icon_col.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self.icon_col.set_fixed_width(32)
self.icon_col.set_min_width(32)
self.icon_col.label = Gtk.Label("*")
self._append_column(self.icon_col)
"""track number"""
self.trkn_col = Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=self.tracknumber[0])
self.trkn_col.key = "N"
self.trkn_col.set_clickable(True)
self.trkn_col.label = Gtk.Label("№")
self.trkn_col.label.show()
self.trkn_col.item = Gtk.CheckMenuItem(_("Number"))
self.trkn_col.set_widget(self.trkn_col.label)
self._append_column(self.trkn_col)
"""column composer"""
self.comp_col = Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=self.composer[0])
self.comp_col.key = "Composer"
self.comp_col.set_resizable(True)
self.comp_col.label = Gtk.Label(_("Composer"))
self.comp_col.item = Gtk.CheckMenuItem(_("Composer"))
self._append_column(self.comp_col)
"""column artist title"""
self.description_col = Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=self.text[0], font=self.font[0])
self.description_col.key = "Track"
self.description_col.set_resizable(True)
self.description_col.label = Gtk.Label(_("Track"))
self.description_col.item = Gtk.CheckMenuItem(_("Track"))
self._append_column(self.description_col)
"""column artist"""
self.artist_col = Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=self.artist[0])
self.artist_col.key = "Artist"
self.artist_col.set_sizing(Gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.artist_col.set_resizable(True)
self.artist_col.label = Gtk.Label(_("Artist"))
self.artist_col.item = Gtk.CheckMenuItem(_("Artist"))
self._append_column(self.artist_col)
"""column title"""
self.title_col = Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=self.title[0])
self.title_col.key = "Title"
self.title_col.set_sizing(Gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.title_col.set_resizable(True)
self.title_col.label = Gtk.Label(_("Title"))
self.title_col.item = Gtk.CheckMenuItem(_("Title"))
self._append_column(self.title_col)
"""column album"""
self.album_col = Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=self.album[0])
self.album_col.key = "Album"
if self.album_col.key not in FC().columns:
FC().columns[self.album_col.key] = [False, 7, 90]
self.album_col.set_sizing(Gtk.TREE_VIEW_COLUMN_AUTOSIZE)
self.album_col.set_resizable(True)
self.album_col.label = Gtk.Label(_("Album"))
self.album_col.item = Gtk.CheckMenuItem(_("Album"))
self._append_column(self.album_col)
"""column time"""
self.time_col = Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=self.time[0])
self.time_col.key = "Time"
self.time_col.label = Gtk.Label(_("Time"))
self.time_col.item = Gtk.CheckMenuItem(_("Time"))
self._append_column(self.time_col)
self.configure_send_drag()
self.configure_recive_drag()
self.set_playlist_plain()
self.connect("button-release-event", self.on_button_press)
self.on_load()
self.connect("columns-changed", self.on_columns_changed)
def set_playlist_tree(self):
self.rebuild_as_tree()
def set_playlist_plain(self):
self.rebuild_as_plain()
def on_key_release(self, w, e):
if is_modificator(e):
return
elif is_key(e, KEY_RETURN):
self.controls.play_selected_song()
elif is_key(e, KEY_DELETE):
self.delete_selected()
elif is_key(e, 'Left'):
self.controls.seek_down()
elif is_key(e, 'Right'):
self.controls.seek_up()
def get_bean_under_pointer_icon(self):
for row in self.model:
if row[self.play_icon[0]]:
bean = self.get_bean_from_row(row)
return bean
def common_single_random(self):
logging.debug("Repeat state " + str(FC().repeat_state))
if FC().repeat_state == const.REPEAT_SINGLE:
return self.get_current_bean_by_UUID()
if FC().is_order_random:
bean = self.get_random_bean()
self.set_play_icon_to_bean(bean)
return bean
def next(self):
bean = self.common_single_random()
if bean:
self.scroll_follow_play_icon()
return bean
bean = self.get_next_bean(FC().repeat_state == const.REPEAT_ALL)
if not bean:
self.controls.state_stop()
return
self.set_play_icon_to_bean(bean)
self.scroll_follow_play_icon()
logging.debug("Next bean " + str(bean) + bean.text)
return bean
def prev(self):
if FC().repeat_state == const.REPEAT_SINGLE:
return self.get_current_bean_by_UUID()
bean = self.get_prev_bean(FC().repeat_state == const.REPEAT_ALL)
if not bean:
self.controls.state_stop()
return
self.set_play_icon_to_bean(bean)
self.scroll_follow_play_icon()
return bean
@idle_task
def scroll_follow_play_icon(self):
paths = [(i,) for i, row in enumerate(self.model)]
for row, path in zip(self.model, paths):
if row[self.play_icon[0]]:
start_path, end_path = self.get_visible_range()
path = row.path
if path >= end_path or path <= start_path:
self.scroll_to_cell(path)
def append(self, paths):
for i, path in enumerate(paths):
if os.path.isdir(path):
listdir = filter(lambda x: get_file_extension(x) in FC().all_support_formats or os.path.isdir(x),
[os.path.join(path, f) for f in os.listdir(path)])
for k, p in enumerate(listdir):
paths.insert(i + k + 1, p)
rows = self.file_paths_to_rows(paths)
if not rows:
return
#rows = self.playlist_filter(rows)
for row in rows:
self.model.append(None, row)
thread.start_new_thread(self.safe_fill_treerows, ())
def is_empty(self):
return True if not self.model.get_iter_first() else False
def on_button_press(self, w, e):
if self.header_pressed:
self.header_pressed = False
return
if is_empty_click(w, e):
w.get_selection().unselect_all()
if is_double_left_click(e):
self.controls.play_selected_song()
if is_rigth_click(e):
right_click_optimization_for_trees(w, e)
beans = self.get_selected_beans()
if beans:
self.tree_menu.clear()
self.tree_menu.add_item(_('Play'), Gtk.STOCK_MEDIA_PLAY, self.controls.play_selected_song, None)
self.tree_menu.add_item(_('Delete from playlist'), Gtk.STOCK_DELETE, self.delete_selected, None)
paths = []
inet_paths = []
local_paths = []
for bean in beans:
if bean.path in paths:
continue
paths.append(bean.path)
if not bean.path or bean.path.startswith("http://"):
inet_paths.append(bean.path)
else:
local_paths.append(bean.path)
if local_paths:
self.tree_menu.add_item(_('Copy To...'), Gtk.STOCK_ADD, copy_to, local_paths)
self.tree_menu.add_item(_("Open in file manager"), None, open_in_filemanager, local_paths[0])
if inet_paths:
self.tree_menu.add_item(_('Download'), Gtk.STOCK_ADD,
self.controls.dm.append_tasks, self.get_all_selected_beans())
self.tree_menu.add_item(_('Download To...'), Gtk.STOCK_ADD,
self.controls.dm.append_tasks_with_dialog, self.get_all_selected_beans())
self.tree_menu.add_separator()
if local_paths:
self.tree_menu.add_item(_('Edit Tags'), Gtk.STOCK_EDIT, edit_tags, (self.controls, local_paths))
self.tree_menu.add_item(_('Format Converter'), Gtk.STOCK_CONVERT, convert_files, local_paths)
text = self.get_selected_bean().text
self.tree_menu.add_item(_('Copy To Search Line'), Gtk.STOCK_COPY,
self.controls.searchPanel.set_search_text, text)
self.tree_menu.add_separator()
self.tree_menu.add_item(_('Copy №-Title-Time'), Gtk.STOCK_COPY, self.copy_info_to_clipboard)
self.tree_menu.add_item(_('Copy Artist-Title-Album'), Gtk.STOCK_COPY,
self.copy_info_to_clipboard, True)
self.tree_menu.add_separator()
self.tree_menu.add_item(_('Love This Track(s) by Last.fm'), None,
self.controls.love_this_tracks, self.get_all_selected_beans())
self.tree_menu.add_item(_('Add to My Audio (VK)'), None,
self.controls.add_to_my_playlist, self.get_all_selected_beans())
self.tree_menu.add_item(_('Copy link'), None,
self.controls.copy_link, self.get_all_selected_beans())
self.tree_menu.show(e)
def on_click_header(self, w, e):
self.header_pressed = True
if is_rigth_click(e):
if "menu" in w.__dict__:
w.menu.show(e)
else:
self.menu.show(e)
def on_toggled_num(self, *a):
FC().numbering_by_order = not FC().numbering_by_order
number_music_tabs = self.controls.notetabs.get_n_pages() - 1
for page in xrange(number_music_tabs, -1, -1):
tab_content = self.controls.notetabs.get_nth_page(page)
pl_tree = tab_content.get_child()
if FC().numbering_by_order:
pl_tree.update_tracknumber()
pl_tree.num_order.set_active(True)
continue
pl_tree.num_tags.set_active(True)
for row in pl_tree.model:
if row[pl_tree.is_file[0]]:
audio = get_mutagen_audio(row[pl_tree.path[0]])
if audio and audio.has_key('tracknumber'):
row[pl_tree.tracknumber[0]] = re.search('\d*', audio['tracknumber'][0]).group()
if audio and audio.has_key('trkn'):
row[pl_tree.tracknumber[0]] = re.search('\d*', audio["trkn"][0]).group()
def on_toggle(self, w, e, column):
FC().columns[column.key][0] = not FC().columns[column.key][0]
number_music_tabs = self.controls.notetabs.get_n_pages() - 1
for key in self.__dict__.keys():
if self.__dict__[key] is column:
atr_name = key
break
for page in xrange(number_music_tabs, -1, -1):
tab_content = self.controls.notetabs.get_nth_page(page)
pl_tree = tab_content.get_child()
## TODO: check "local variable 'atr_name' might be referenced before assignment"
pl_tree_column = pl_tree.__dict__[atr_name]
if FC().columns[column.key][0]:
pl_tree.move_column_after(pl_tree_column, pl_tree.icon_col)
pl_tree_column.set_visible(True)
if self is not pl_tree:
pl_tree_column.item.set_active(True)
else:
pl_tree_column.set_visible(False)
if self is not pl_tree:
pl_tree_column.item.set_active(False)
def _append_column(self, column):
column.set_widget(column.label)
column.set_sizing(Gtk.TREE_VIEW_COLUMN_FIXED)
if column.key in ['*', 'N', 'Time']:
column.set_sizing(Gtk.TREE_VIEW_COLUMN_AUTOSIZE)
else:
column.set_sizing(Gtk.TREE_VIEW_COLUMN_FIXED)
if FC().columns[column.key][2] > 0:
column.set_fixed_width(FC().columns[column.key][2])
self.append_column(column)
column.button = column.label.get_parent().get_parent().get_parent()
column.button.connect("button-press-event", self.on_click_header)
'''
if column.key == 'N':
self.trkn_col.button.menu = Popup()
group = []
self.num_order = Gtk.RadioMenuItem.new_with_label(group, _("Numbering by order"))
self.num_order.connect("button-press-event", self.on_toggled_num)
group.append(self.num_order)
self.num_tags = Gtk.RadioMenuItem.new_with_label(group, _("Numbering by tags"))
self.num_tags.connect("button-press-event", self.on_toggled_num)
group.append(self.num_tags)
self.trkn_col.button.menu.append(self.num_order)
self.trkn_col.button.menu.append(self.num_tags)
if FC().numbering_by_order:
self.num_order.set_active(True)
else:
self.num_tags.set_active(True)
'''
def on_columns_changed(self, *a):
global FLAG
if FLAG:
return
FLAG = True
number_music_tabs = self.controls.notetabs.get_n_pages() - 1
for i, column in enumerate(self.get_columns()):
FC().columns[column.key][1] = i
if column.get_width() > 1: # to avoid recording of zero width in config
FC().columns[column.key][2] = column.get_width()
for page in xrange(number_music_tabs, 0, -1):
tab_content = self.controls.notetabs.get_nth_page(page)
pl_tree = tab_content.get_child()
col_list = pl_tree.get_columns()
col_list.sort(self.to_order_columns, reverse=True)
for column in col_list:
pl_tree.move_column_after(column, None)
FLAG = False
def to_order_columns(self, x, y):
return cmp(FC().columns[x.key][1], FC().columns[y.key][1])
def on_load(self):
col_list = self.get_columns()
col_list.sort(self.to_order_columns, reverse=True)
visible_columns = []
for column in col_list:
column.label.show()
column.set_widget(column.label)
column.set_clickable(True)
if column.key != "*":
column.set_reorderable(True)
if FC().columns[column.key][0]:
self.move_column_after(column, None)
if "item" in column.__dict__:
column.item.connect("button-press-event", self.on_toggle, column)
self.menu.append(column.item)
column.item.set_active(True)
visible_columns.append(column)
else:
if "item" in column.__dict__:
column.item.connect("button-press-event", self.on_toggle, column)
self.menu.append(column.item)
column.item.set_active(False)
column.set_visible(False)
'''if FC().columns["Track"][2] < 0:
self.description_col.set_fixed_width(self.get_allocation().width - (FC().columns["Time"][2]+70))'''
def change_rows_by_path(self, file_paths):
for treerow in self.model:
if treerow[self.is_file[0]] and treerow[self.path[0]] in file_paths:
bean = self.get_bean_from_row(treerow)
bean = update_id3(bean)
row_ref = Gtk.TreeRowReference.new(self.model, treerow.path)
self.fill_row(row_ref, bean)
GLib.idle_add(self.controls.notetabs.save_current_tab, priority=GLib.PRIORITY_LOW)
def file_paths_to_rows(self, paths):
result = []
for path in paths:
bean = get_bean_from_file(path)
beans = update_id3_for_m3u([bean])
beans = update_id3_for_pls(beans)
if beans and (len(beans) > 1 or is_playlist(bean.path)):
bean = bean.add_text(_('Playlist: ') + bean.text).add_font("bold").add_is_file(False)
bean.path = ''
beans.insert(0, bean)
for bean in beans:
result.append(self.get_row_from_bean(bean))
return result
def on_drag_data_received(self, treeview, context, x, y, selection, info, timestamp):
logging.debug('Playlist on_drag_data_received')
model = self.get_model().get_model()
drop_info = self.get_dest_row_at_pos(x, y)
if drop_info:
path, position = drop_info
iter = model.get_iter(path)
files = sorted([file for file in get_files_from_gtk_selection_data(selection)
if os.path.isdir(file) or get_file_extension(file) in FC().all_support_formats],
key=lambda x: x[self.text[0]])
if files:
'''dnd from the outside of the player'''
if self.is_empty():
if len(files) == 1 and os.path.isdir(files[0]):
tabname = os.path.basename(files[0])
else:
tabname = os.path.split(os.path.dirname(files[0]))[1]
self.controls.notetabs.rename_tab(self.scroll, tabname)
for i, file in enumerate(files):
if os.path.isdir(file):
sorted_dirs = []
sorted_files = []
for f in sorted(os.listdir(file), key=lambda x: x):
f = os.path.join(file, f)
if os.path.isdir(f):
sorted_dirs.append(f)
elif get_file_extension(f) in FC().all_support_formats:
sorted_files.append(f)
listdir = sorted_dirs + sorted_files
'''
listdir = sorted(filter(lambda x: get_file_extension(x) in FC().all_support_formats or os.path.isdir(x),
[os.path.join(file, f) for f in os.listdir(file)]), key=lambda x: x)
'''
for k, path in enumerate(listdir):
files.insert(i + k + 1, path)
rows = self.file_paths_to_rows(files)
if not rows:
return
rows = self.playlist_filter(rows)
for row in rows:
if drop_info:
if (position == Gtk.TREE_VIEW_DROP_BEFORE
or position == Gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
model.insert_before(None, iter, row)
else:
model.insert_after(None, iter, row)
iter = model.iter_next(iter)
else:
model.append(None, row)
else:
'''dnd inside the player'''
# ff - from_filter
ff_tree = Gtk.drag_get_source_widget(context)
ff_model, ff_paths = ff_tree.get_selection().get_selected_rows()
treerows = [ff_model[ff_path] for ff_path in ff_paths]
if self is ff_tree:
'''internal dnd'''
ff_row_refs = [Gtk.TreeRowReference.new(ff_model, ff_path) for ff_path in ff_paths]
for ff_row_ref in ff_row_refs:
ff_iter = self.get_iter_from_row_reference(ff_row_ref)
f_iter = ff_model.convert_iter_to_child_iter(ff_iter)
if drop_info:
if (position == Gtk.TREE_VIEW_DROP_BEFORE
or position == Gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
model.move_before(f_iter, iter)
else:
model.move_after(f_iter, iter)
iter = model.iter_next(iter)
else:
model.move_before(f_iter, None)
return
else:
'''dnd from other tree'''
if self.is_empty():
path = treerows[0][self.path[0]]
if path:
if len(treerows) == 1 and os.path.isdir(path):
tabname = os.path.basename(path)
else:
tabname = os.path.split(os.path.dirname(path))[1]
self.controls.notetabs.rename_tab(self.scroll, tabname)
else:
pass
for i, treerow in enumerate(treerows):
for k, ch_row in enumerate(treerow.iterchildren()):
treerows.insert(i + k + 1, ch_row)
#treerows = self.playlist_filter(treerows)
for i, treerow in enumerate(treerows):
if is_playlist(treerow[self.path[0]]):
rows = self.file_paths_to_rows([treerow[self.path[0]]])
if rows:
rows.reverse()
map(lambda row: treerows.insert(i + 1, row), rows)
continue
row = [col for col in treerow]
if drop_info:
if (position == Gtk.TREE_VIEW_DROP_BEFORE
or position == Gtk.TREE_VIEW_DROP_INTO_OR_BEFORE):
model.insert_before(None, iter, row)
else:
model.insert_after(None, iter, row)
iter = model.iter_next(iter)
else:
model.append(None, row)
thread.start_new_thread(self.safe_fill_treerows, ())
context.finish(True, False, timestamp)
self.stop_emission('drag-data-received')
return True
| sitexa/foobnix | foobnix/gui/treeview/playlist_tree.py | Python | gpl-3.0 | 24,000 |
"""Generic Limiter to ensure N parallel operations
.. note::
The limiter functionality is new.
Please report any issues found on `the retools Github issue
tracker <https://github.com/bbangert/retools/issues>`_.
The limiter is useful when you want to make sure that only N operations for a given process happen at the same time,
i.e.: concurrent requests to the same domain.
The limiter works by acquiring and releasing limits.
Creating a limiter::
from retools.limiter import Limiter
def do_something():
limiter = Limiter(limit=10, prefix='my-operation') # using default redis connection
for i in range(100):
if limiter.acquire_limit('operation-%d' % i):
execute_my_operation()
limiter.release_limit('operation-%d' % i) # since we are releasing it synchronously
# all the 100 operations will be performed with
# one of them locked at a time
Specifying a default expiration in seconds::
def do_something():
limiter = Limiter(limit=10, expiration_in_seconds=45) # using default redis connection
Specifying a redis connection::
def do_something():
limiter = Limiter(limit=10, redis=my_redis_connection)
Every time you try to acquire a limit, the expired limits you previously acquired get removed from the set.
This way if your process dies in the mid of its operation, the keys will eventually expire.
"""
import time
import redis
from retools import global_connection
from retools.util import flip_pairs
class Limiter(object):
'''Configures and limits operations'''
def __init__(self, limit, redis=None, prefix='retools_limiter', expiration_in_seconds=10):
"""Initializes a Limiter.
:param limit: An integer that describes the limit on the number of items
:param redis: A Redis instance. Defaults to the redis instance
on the global_connection.
:param prefix: The default limit set name. Defaults to 'retools_limiter'.
:param expiration_in_seconds: The number in seconds that keys should be locked if not
explicitly released.
"""
self.limit = limit
self.redis = redis or global_connection.redis
self.prefix = prefix
self.expiration_in_seconds = expiration_in_seconds
def acquire_limit(self, key, expiration_in_seconds=None, retry=True):
"""Tries to acquire a limit for a given key. Returns True if the limit can be acquired.
:param key: A string with the key to acquire the limit for.
This key should be used when releasing.
:param expiration_in_seconds: The number in seconds that this key should be locked if not
explicitly released. If this is not passed, the default is used.
:param key: Internal parameter that specifies if the operation should be retried.
Defaults to True.
"""
limit_available = self.redis.zcard(self.prefix) < self.limit
if limit_available:
self.__lock_limit(key, expiration_in_seconds)
return True
if retry:
self.redis.zremrangebyscore(self.prefix, '-inf', time.time())
return self.acquire_limit(key, expiration_in_seconds, retry=False)
return False
def release_limit(self, key):
"""Releases a limit for a given key.
:param key: A string with the key to release the limit on.
"""
self.redis.zrem(self.prefix, key)
def __lock_limit(self, key, expiration_in_seconds=None):
expiration = expiration_in_seconds or self.expiration_in_seconds
self.__zadd(self.prefix, key, time.time() + expiration)
def __zadd(self, set_name, *args, **kwargs):
"""
Custom ZADD interface that adapts to match the argument order of the currently
used backend. Using this method makes it transparent whether you use a Redis
or a StrictRedis connection.
Found this code at https://github.com/ui/rq-scheduler/pull/17.
"""
conn = self.redis
# If we're dealing with StrictRedis, flip each pair of imaginary
# (name, score) tuples in the args list
if conn.__class__ is redis.StrictRedis: # StrictPipeline is a subclass of StrictRedis, too
args = tuple(flip_pairs(args))
return conn.zadd(set_name, *args)
| bbangert/retools | retools/limiter.py | Python | mit | 4,552 |
import sys
import numpy as np
import scipy as sp
import scipy.sparse
from openpnm.solvers import IterativeSolver
from openpnm.utils import logging
logger = logging.getLogger(__name__)
try:
import petsc4py
# Next line must be before importing PETSc
petsc4py.init(sys.argv)
from petsc4py import PETSc
except ModuleNotFoundError:
pass
__all__ = ['PETScLinearSolver']
class PETScLinearSolver(IterativeSolver):
r"""
Solves the sparse linear system Ax = b using petsc solvers.
Notes
-----
Parallel computing is supported and matrix partitioning over the
available cores is automatically handled by running:
.. code::
$ mpirun -np num_cores python script.py
where ``num_cores`` must be substituted with the number of cores.
"""
def _create_solver(self):
r"""
This method creates the petsc sparse linear solver.
"""
# https://petsc.org/release/docs/manualpages/KSP/KSPType.html
iterative = [
'richardson', 'chebyshev', 'cg', 'groppcg', 'pipecg', 'pipecgrr',
'cgne', 'nash', 'stcg', 'gltr', 'fcg', 'pipefcg', 'gmres',
'pipefgmres', 'fgmres', 'lgmres', 'dgmres', 'pgmres', 'tcqmr',
'bcgs', 'ibcgs', 'fbcgs', 'fbcgsr', 'bcgsl', 'pipebcgs', 'cgs',
'tfqmr', 'cr', 'pipecr', 'lsqr', 'preonly', 'qcg', 'bicg',
'minres', 'symmlq', 'lcd', 'python', 'gcr', 'pipegcr', 'tsirm',
'cgls', 'fetidp']
# https://petsc.org/release/docs/manualpages/PC/PCType.html
preconditioners = [
'none', 'jacobi', 'sor', 'lu', 'shell', 'bjacobi', 'mg',
'eisenstat', 'ilu', 'icc', 'asm', 'gasm', 'ksp', 'composite',
'redundant', 'spai', 'nn', 'cholesky', 'pbjacobi', 'mat', 'hypre',
'parms', 'fieldsplit', 'tfs', 'ml', 'galerkin', 'exotic', 'cp',
'bfbt', 'lsc', 'python', 'pfmg', 'syspfmg', 'redistribute', 'svd',
'gamg', 'sacusp', 'sacusppoly', 'bicgstabcusp', 'ainvcusp',
'chowiluviennacl', 'rowscalingviennacl', 'saviennacl', 'bddc',
'kaczmarz', 'telescope']
direct_lu = ['mumps', 'superlu_dist', 'umfpack', 'klu']
direct_cholesky = ['mumps', 'cholmod']
valid_solvers = iterative + direct_lu + direct_cholesky
solver = self.solver_type
preconditioner = self.preconditioner
if solver not in valid_solvers:
raise Exception(f"{solver} solver not availabe, choose another solver")
if preconditioner not in preconditioners:
raise Exception(f"{preconditioner} not found, choose another preconditioner")
self.ksp = PETSc.KSP()
self.ksp.create(PETSc.COMM_WORLD)
if solver in direct_lu:
self.ksp.getPC().setType('lu')
self.ksp.getPC().setFactorSolverType(solver)
self.ksp.setType('preonly')
elif solver in direct_cholesky:
self.ksp.getPC().setType('cholesky')
self.ksp.getPC().setFactorSolverType(solver)
self.ksp.setType('preonly')
elif solver in preconditioners:
self.ksp.getPC().setType(solver)
self.ksp.setType('preonly')
elif solver in iterative:
self.ksp.getPC().setType(preconditioner)
self.ksp.setType(solver)
def _set_tolerances(self, atol=None, rtol=None, maxiter=None):
r"""
Set absolute and relative tolerances, and maximum number of iterations.
"""
atol = self.atol if atol is None else atol
rtol = self.rtol if rtol is None else rtol
maxiter = self.maxiter if maxiter is None else maxiter
# BUG: PETSc misses rtol requirement by ~10-20X -> Report to petsc4py
self.ksp.setTolerances(atol=None, rtol=rtol/50, max_it=maxiter)
def _assemble_A(self):
r"""
This method creates the petsc sparse coefficients matrix from the
OpenPNM scipy one. The method also equally decomposes the matrix at
certain rows into different blocks (each block contains all the
columns) and distributes them over the pre-assigned cores for parallel
computing. The method can be used in serial.
"""
# Create a petsc sparse matrix
self.petsc_A = PETSc.Mat()
self.petsc_A.create(PETSc.COMM_WORLD)
self.petsc_A.setSizes([self.m, self.n])
self.petsc_A.setType('aij') # sparse
self.petsc_A.setUp()
# Loop over owned block of rows on this processor
# and insert entry values (for parallel computing).
self.Istart, self.Iend = self.petsc_A.getOwnershipRange()
# Assign values to the coefficients matrix from the scipy sparse csr
size_tmp = self.A.shape
# Row indices
csr1 = self.A.indptr[self.Istart:self.Iend+1] - self.A.indptr[self.Istart]
ind1 = self.A.indptr[self.Istart]
ind2 = self.A.indptr[self.Iend]
csr2 = self.A.indices[ind1:ind2] # column indices
csr3 = self.A.data[ind1:ind2] # data
self.petsc_A = PETSc.Mat().createAIJ(size=size_tmp,
csr=(csr1, csr2, csr3))
# Communicate off-processor values and setup internal data structures
# for performing parallel operations
self.petsc_A.assemblyBegin()
self.petsc_A.assemblyEnd()
def _assemble_b_and_x(self):
r"""
Initialize the solution vector (self.petsc_x), which is a dense
matrix (1D vector) and defines the rhs vector (self.petsc_b) from
the existing data.
"""
# Get vector(s) compatible with the matrix (same parallel layout)
# passing same communicator as the A matrix
# Global solution vector (all the local solutions will return to it)
self.petsc_s = PETSc.Vec()
self.petsc_s.create(PETSc.COMM_WORLD)
self.petsc_s.setSizes(self.m)
self.petsc_s.setFromOptions()
self.Istart, self.Iend = self.petsc_s.getOwnershipRange()
self.petsc_x = (self.petsc_s).duplicate()
self.petsc_b = (self.petsc_s).duplicate()
# Set the solution vector to zeros or the given initial guess (if any)
PETSc.Vec.setArray(self.petsc_x, self.x0[self.Istart: self.Iend])
# Define the petsc rhs vector from the numpy one
PETSc.Vec.setArray(self.petsc_b, self.b[self.Istart: self.Iend])
def solve(self, A, b, x0=None, solver_type='cg', precondioner='jacobi',
maxiter=None, atol=None, rtol=None):
r"""
Solves and returns the solution to the linear system, Ax = b.
This method converts the solution vector from a PETSc.Vec
instance to a numpy array, and finally destroys all the PETSc
objects to free memory.
Parameters
----------
A : csr_matrix
Coefficients matrix in Ax = b
b : ndarray
Right-hand-side vector in Ax = b
solver_type : str, optional
Default is the iterative solver 'cg' based on the
Conjugate Gradient method.
preconditioner: str, optional
Default is the 'jacobi' preconditioner, i.e., diagonal
scaling preconditioning. The preconditioner is used with
iterative solvers. When a direct solver is used, this
parameter is ignored.
factorization_type : str, optional
The factorization type used with the direct solver.
Default is 'lu'. This parameter is ignored when an
iterative solver is used.
Returns
-------
ndarray
The solution to Ax = b
Notes
-----
Certain combinations of iterative solvers and precondioners
or direct solvers and factorization types are not supported.
The summary table of the different possibilities
can be found
`here <https://petsc.org/main/overview/linear_solve_table>`_
"""
self.b = b
self.A = sp.sparse.csr_matrix(A)
self.m, self.n = self.A.shape
self.x0 = np.zeros_like(self.b) if x0 is None else x0
self.solver_type = solver_type
self.preconditioner = precondioner
self.atol = self._get_atol(self.b)
self.rtol = self._get_rtol(self.x0)
self._assemble_b_and_x()
self._assemble_A()
self._create_solver()
self._set_tolerances(atol=atol, rtol=rtol, maxiter=maxiter)
self.ksp.setOperators(self.petsc_A)
self.ksp.setFromOptions()
# Solve the linear system
self.ksp.solve(self.petsc_b, self.petsc_x)
# Gather the solution to all processors
gather_to_0, self.petsc_s = PETSc.Scatter().toAll(self.petsc_x)
gather_to_0.scatter(self.petsc_x, self.petsc_s,
PETSc.InsertMode.INSERT, PETSc.ScatterMode.FORWARD)
# Convert solution vector from PETSc.Vec instance to a numpy array
self.solution = PETSc.Vec.getArray(self.petsc_s)
# Destroy petsc solver, coefficients matrix, rhs, and solution vectors
PETSc.KSP.destroy(self.ksp)
PETSc.Mat.destroy(self.petsc_A)
PETSc.Vec.destroy(self.petsc_b)
PETSc.Vec.destroy(self.petsc_x)
PETSc.Vec.destroy(self.petsc_s)
# FIXME: fetch exit_code somehow from petsc
exit_code = 0
return self.solution, exit_code
| PMEAL/OpenPNM | openpnm/solvers/_petsc.py | Python | mit | 9,448 |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'lxml',
]
setup(name='pyayml',
version='0.0',
description='pyayml',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='yandex market',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="pyayml",
)
| aleksandr-rakov/pyayml | setup.py | Python | mit | 885 |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
A factory class to build links
"""
from __future__ import absolute_import, division, print_function
class LinkFactory(object):
"""Static Factory class used by build `Link` objects.
The `Link` objects are registerd and accessed by
their appname data member
"""
_class_dict = {}
@staticmethod
def register(appname, cls):
"""Register a class with this factory """
LinkFactory._class_dict[appname] = cls
@staticmethod
def create(appname, **kwargs):
"""Create a `Link` of a particular class, using the kwargs as options"""
if appname in LinkFactory._class_dict:
return LinkFactory._class_dict[appname].create(**kwargs)
else:
raise KeyError(
"Could not create object associated to app %s" % appname)
| fermiPy/fermipy | fermipy/jobs/factory.py | Python | bsd-3-clause | 908 |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import webob.exc
from neutron.api import extensions
from neutron.common import config
from neutron import context
import neutron.extensions
from neutron.extensions import metering
from neutron.plugins.common import constants
from neutron.services.metering import metering_plugin
from neutron.tests.unit import test_db_plugin
DB_METERING_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
extensions_path = ':'.join(neutron.extensions.__path__)
class MeteringPluginDbTestCaseMixin(object):
def _create_metering_label(self, fmt, name, description, **kwargs):
data = {'metering_label': {'name': name,
'tenant_id': kwargs.get('tenant_id',
'test-tenant'),
'shared': kwargs.get('shared', False),
'description': description}}
req = self.new_create_request('metering-labels', data,
fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id'],
is_admin=kwargs.get('is_admin', True)))
return req.get_response(self.ext_api)
def _make_metering_label(self, fmt, name, description, **kwargs):
res = self._create_metering_label(fmt, name, description, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
def _create_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
data = {'metering_label_rule':
{'metering_label_id': metering_label_id,
'tenant_id': kwargs.get('tenant_id', 'test-tenant'),
'direction': direction,
'excluded': excluded,
'remote_ip_prefix': remote_ip_prefix}}
req = self.new_create_request('metering-label-rules',
data, fmt)
if kwargs.get('set_context') and 'tenant_id' in kwargs:
# create a specific auth context for this request
req.environ['neutron.context'] = (
context.Context('', kwargs['tenant_id']))
return req.get_response(self.ext_api)
def _make_metering_label_rule(self, fmt, metering_label_id, direction,
remote_ip_prefix, excluded, **kwargs):
res = self._create_metering_label_rule(fmt, metering_label_id,
direction, remote_ip_prefix,
excluded, **kwargs)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
@contextlib.contextmanager
def metering_label(self, name='label', description='desc',
fmt=None, **kwargs):
if not fmt:
fmt = self.fmt
metering_label = self._make_metering_label(fmt, name,
description, **kwargs)
yield metering_label
@contextlib.contextmanager
def metering_label_rule(self, metering_label_id=None, direction='ingress',
remote_ip_prefix='10.0.0.0/24',
excluded='false', fmt=None):
if not fmt:
fmt = self.fmt
metering_label_rule = self._make_metering_label_rule(fmt,
metering_label_id,
direction,
remote_ip_prefix,
excluded)
yield metering_label_rule
class MeteringPluginDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase,
MeteringPluginDbTestCaseMixin):
fmt = 'json'
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self, plugin=None):
service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
super(MeteringPluginDbTestCase, self).setUp(
plugin=plugin,
service_plugins=service_plugins
)
self.plugin = metering_plugin.MeteringPlugin()
ext_mgr = extensions.PluginAwareExtensionManager(
extensions_path,
{constants.METERING: self.plugin}
)
app = config.load_paste_app('extensions_test_app')
self.ext_api = extensions.ExtensionMiddleware(app, ext_mgr=ext_mgr)
class TestMetering(MeteringPluginDbTestCase):
def test_create_metering_label(self):
name = 'my label'
description = 'my metering label'
keys = [('name', name,), ('description', description)]
with self.metering_label(name, description) as metering_label:
for k, v, in keys:
self.assertEqual(metering_label['metering_label'][k], v)
def test_create_metering_label_shared(self):
name = 'my label'
description = 'my metering label'
shared = True
keys = [('name', name,), ('description', description),
('shared', shared)]
with self.metering_label(name, description,
shared=shared) as metering_label:
for k, v, in keys:
self.assertEqual(metering_label['metering_label'][k], v)
def test_delete_metering_label(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
def test_list_metering_label(self):
name = 'my label'
description = 'my metering label'
with contextlib.nested(
self.metering_label(name, description),
self.metering_label(name, description)) as metering_label:
self._test_list_resources('metering-label', metering_label)
def test_create_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
keys = [('metering_label_id', metering_label_id),
('direction', direction),
('excluded', excluded),
('remote_ip_prefix', remote_ip_prefix)]
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as label_rule:
for k, v, in keys:
self.assertEqual(label_rule['metering_label_rule'][k], v)
def test_delete_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded) as label_rule:
rule_id = label_rule['metering_label_rule']['id']
self._delete('metering-label-rules', rule_id, 204)
def test_list_metering_label_rule(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with contextlib.nested(
self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded),
self.metering_label_rule(metering_label_id,
'ingress',
remote_ip_prefix,
excluded)) as metering_label_rule:
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_metering_label_rules(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with contextlib.nested(
self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix,
excluded),
self.metering_label_rule(metering_label_id,
direction,
'0.0.0.0/0',
False)) as metering_label_rule:
self._test_list_resources('metering-label-rule',
metering_label_rule)
def test_create_overlap_metering_label_rules(self):
name = 'my label'
description = 'my metering label'
with self.metering_label(name, description) as metering_label:
metering_label_id = metering_label['metering_label']['id']
direction = 'egress'
remote_ip_prefix1 = '192.168.0.0/24'
remote_ip_prefix2 = '192.168.0.0/16'
excluded = True
with self.metering_label_rule(metering_label_id,
direction,
remote_ip_prefix1,
excluded):
res = self._create_metering_label_rule(self.fmt,
metering_label_id,
direction,
remote_ip_prefix2,
excluded)
self.assertEqual(webob.exc.HTTPConflict.code, res.status_int)
def test_create_metering_label_rule_two_labels(self):
name1 = 'my label 1'
name2 = 'my label 2'
description = 'my metering label'
with self.metering_label(name1, description) as metering_label1:
metering_label_id1 = metering_label1['metering_label']['id']
with self.metering_label(name2, description) as metering_label2:
metering_label_id2 = metering_label2['metering_label']['id']
direction = 'egress'
remote_ip_prefix = '192.168.0.0/24'
excluded = True
with contextlib.nested(
self.metering_label_rule(metering_label_id1,
direction,
remote_ip_prefix,
excluded),
self.metering_label_rule(metering_label_id2,
direction,
remote_ip_prefix,
excluded)) as metering_label_rule:
self._test_list_resources('metering-label-rule',
metering_label_rule)
| cloudbase/neutron-virtualbox | neutron/tests/unit/db/metering/test_db_metering.py | Python | apache-2.0 | 13,144 |
#!/usr/bin/python
import serial
import time
import random
s = None
num_leds = 72
play_time = 5.0
def flush_input():
s.flushInput()
def wait_for_ack():
while s.inWaiting() <= 0:
pass
s.read(s.inWaiting())
def command(cmd_text):
s.write((cmd_text + ':').encode())
wait_for_ack()
def setup():
global s
s = serial.Serial("/dev/ttyS0", 115200)
flush_input()
command("::pause:64:window:erase")
size_range_min = 1
size_range_max = 8
num_colors = 12
colors = [ "red", "orange", "yellow", "ltgreen", "green", "seafoam", "cyan", "ltblue", "blue", "purple", "magenta", "pink", "random" ]
effects = ['blink1','blink2','blink3','blink4','blink5','blink6']
effect_index = 0
def loop():
command("0,71:pshifto")
osize = random.randrange(size_range_min, size_range_max) + 1
times = int(num_leds / osize) + 0
midpoint = times / 2
effect_index = (midpoint + 1) % 6
if random.randrange(0, 2) == 0:
dir = 1
else:
dir = -1
for i in range(0, times):
effect = effects[effect_index]
if i < (times / 2):
effect_index = (effect_index - dir) % 6
else:
effect_index = (effect_index + dir) % 6
color = colors[random.randrange(0, num_colors)]
command(str(osize) + "," + str(osize * (times - i)) + ":pshifto")
cmd = ""
cmd = cmd + str(osize * (times - i)) + ":window:"
cmd = cmd + color + ":" + effect + ":" + str(osize - 2) + ":repeat:black"
command(cmd)
command("72:window:continue:flush")
time.sleep(play_time)
command("pause")
if __name__ == '__main__':
setup()
while True:
loop()
| jhogsett/linkit | python/demo3.py | Python | mit | 1,964 |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
# Introduction
# ============
#
# This advanced Pyro tutorial demonstrates a number of inference and prediction
# tricks in the context of epidemiological models, specifically stochastic
# discrete time compartmental models with large discrete state spaces. This
# tutorial assumes the reader has completed all introductory tutorials and
# additionally the tutorials on enumeration and effect handlers (poutines):
# http://pyro.ai/examples/enumeration.html
# http://pyro.ai/examples/effect_handlers.html
import argparse
import logging
import math
import re
from collections import OrderedDict
import torch
import pyro
import pyro.distributions as dist
import pyro.distributions.hmm
import pyro.poutine as poutine
from pyro.infer import MCMC, NUTS, config_enumerate, infer_discrete
from pyro.infer.autoguide import init_to_value
from pyro.ops.special import safe_log
from pyro.ops.tensor_utils import convolve
from pyro.util import warn_if_nan
logging.basicConfig(format="%(message)s", level=logging.INFO)
# A Discrete SIR Model
# ====================
#
# Let's consider one of the simplest compartmental models: an SIR model
# https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SIR_model
# This models the dynamics of three groups within a population:
#
# population = Susceptible + Infected + Recovered
#
# At each discrete time step, each infected person infects a random number of
# susceptible people, and then randomly may recover. We noisily observe the
# number of people newly infected at each time step, assuming an unknown false
# negative rate, but no false positives. Our eventual objective is to estimate
# global model parameters R0 (the basic reproduction number), tau (the expected
# recovery time), and rho (the mean response rate = 1 - false negative rate).
# Having estimated these we will then estimate latent time series and forecast
# forward.
#
# We'll start by defining a discrete_model that uses a helper global_model to
# sample global parameters.
#
# Note we need to use ExtendedBinomial rather than Binomial because the data
# may lie outside of the predicted support. For these values,
# Binomial.log_prob() will error, whereas ExtendedBinomial.log_prob() will
# return -inf.
def global_model(population):
tau = args.recovery_time # Assume this can be measured exactly.
R0 = pyro.sample("R0", dist.LogNormal(0.0, 1.0))
rho = pyro.sample("rho", dist.Uniform(0, 1))
# Convert interpretable parameters to distribution parameters.
rate_s = -R0 / (tau * population)
prob_i = 1 / (1 + tau)
return rate_s, prob_i, rho
def discrete_model(args, data):
# Sample global parameters.
rate_s, prob_i, rho = global_model(args.population)
# Sequentially sample time-local variables.
S = torch.tensor(args.population - 1.0)
I = torch.tensor(1.0)
for t, datum in enumerate(data):
S2I = pyro.sample("S2I_{}".format(t), dist.Binomial(S, -(rate_s * I).expm1()))
I2R = pyro.sample("I2R_{}".format(t), dist.Binomial(I, prob_i))
S = pyro.deterministic("S_{}".format(t), S - S2I)
I = pyro.deterministic("I_{}".format(t), I + S2I - I2R)
pyro.sample("obs_{}".format(t), dist.ExtendedBinomial(S2I, rho), obs=datum)
# We can use this model to simulate data. We'll use poutine.condition to pin
# parameter values and poutine.trace to record sample observations.
def generate_data(args):
logging.info("Generating data...")
params = {
"R0": torch.tensor(args.basic_reproduction_number),
"rho": torch.tensor(args.response_rate),
}
empty_data = [None] * (args.duration + args.forecast)
# We'll retry until we get an actual outbreak.
for attempt in range(100):
with poutine.trace() as tr:
with poutine.condition(data=params):
discrete_model(args, empty_data)
# Concatenate sequential time series into tensors.
obs = torch.stack(
[
site["value"]
for name, site in tr.trace.nodes.items()
if re.match("obs_[0-9]+", name)
]
)
S2I = torch.stack(
[
site["value"]
for name, site in tr.trace.nodes.items()
if re.match("S2I_[0-9]+", name)
]
)
assert len(obs) == len(empty_data)
obs_sum = int(obs[: args.duration].sum())
S2I_sum = int(S2I[: args.duration].sum())
if obs_sum >= args.min_observations:
logging.info(
"Observed {:d}/{:d} infections:\n{}".format(
obs_sum,
S2I_sum,
" ".join([str(int(x)) for x in obs[: args.duration]]),
)
)
return {"S2I": S2I, "obs": obs}
raise ValueError(
"Failed to generate {} observations. Try increasing "
"--population or decreasing --min-observations".format(args.min_observations)
)
# Inference
# =========
#
# While the above discrete_model is easy to understand, its discrete latent
# variables pose a challenge for inference. One of the most popular inference
# strategies for such models is Sequential Monte Carlo. However since Pyro and
# PyTorch are stronger in gradient based vectorizable inference algorithms, we
# will instead pursue inference based on Hamiltonian Monte Carlo (HMC).
#
# Our general inference strategy will be to:
# 1. Introduce auxiliary variables to make the model Markov.
# 2. Introduce more auxiliary variables to create a discrete parameterization.
# 3. Marginalize out all remaining discrete latent variables.
# 4. Vectorize to enable parallel-scan temporal filtering.
#
# Let's consider reparameterizing in terms of the variables (S, I) rather than
# (S2I, I2R). Since these may lead to inconsistent states, we need to replace
# the Binomial transition factors (S2I, I2R) with ExtendedBinomial.
#
# The following model is equivalent to the discrete_model:
@config_enumerate
def reparameterized_discrete_model(args, data):
# Sample global parameters.
rate_s, prob_i, rho = global_model(args.population)
# Sequentially sample time-local variables.
S_curr = torch.tensor(args.population - 1.0)
I_curr = torch.tensor(1.0)
for t, datum in enumerate(data):
# Sample reparameterizing variables.
# When reparameterizing to a factor graph, we ignored density via
# .mask(False). Thus distributions are used only for initialization.
S_prev, I_prev = S_curr, I_curr
S_curr = pyro.sample(
"S_{}".format(t), dist.Binomial(args.population, 0.5).mask(False)
)
I_curr = pyro.sample(
"I_{}".format(t), dist.Binomial(args.population, 0.5).mask(False)
)
# Now we reverse the computation.
S2I = S_prev - S_curr
I2R = I_prev - I_curr + S2I
pyro.sample(
"S2I_{}".format(t),
dist.ExtendedBinomial(S_prev, -(rate_s * I_prev).expm1()),
obs=S2I,
)
pyro.sample("I2R_{}".format(t), dist.ExtendedBinomial(I_prev, prob_i), obs=I2R)
pyro.sample("obs_{}".format(t), dist.ExtendedBinomial(S2I, rho), obs=datum)
# By reparameterizing, we have converted to coordinates that make the model
# Markov. We have also replaced dynamic integer_interval constraints with
# easier static integer_interval constraints (although we'll still need good
# initialization to avoid NANs). Since the discrete latent variables are
# bounded (by population size), we can enumerate out discrete latent variables
# and perform HMC inference over the global latents. However enumeration
# complexity is O(population^4), so this is only feasible for very small
# populations.
#
# Here is an inference approach using an MCMC sampler.
def infer_hmc_enum(args, data):
model = reparameterized_discrete_model
return _infer_hmc(args, data, model)
def _infer_hmc(args, data, model, init_values={}):
logging.info("Running inference...")
kernel = NUTS(
model,
full_mass=[("R0", "rho")],
max_tree_depth=args.max_tree_depth,
init_strategy=init_to_value(values=init_values),
jit_compile=args.jit,
ignore_jit_warnings=True,
)
# We'll define a hook_fn to log potential energy values during inference.
# This is helpful to diagnose whether the chain is mixing.
energies = []
def hook_fn(kernel, *unused):
e = float(kernel._potential_energy_last)
energies.append(e)
if args.verbose:
logging.info("potential = {:0.6g}".format(e))
mcmc = MCMC(
kernel,
hook_fn=hook_fn,
num_samples=args.num_samples,
warmup_steps=args.warmup_steps,
)
mcmc.run(args, data)
mcmc.summary()
if args.plot:
import matplotlib.pyplot as plt
plt.figure(figsize=(6, 3))
plt.plot(energies)
plt.xlabel("MCMC step")
plt.ylabel("potential energy")
plt.title("MCMC energy trace")
plt.tight_layout()
samples = mcmc.get_samples()
return samples
# To scale to large populations, we'll continue to reparameterize, this time
# replacing each of (S_aux,I_aux) with a combination of a bounded real
# variable and a Categorical variable with only four values.
#
# This is the crux: we can now perform HMC over the real variable and
# marginalize out the Categorical variables using variable elimination.
#
# We first define a helper to create enumerated Categorical sites.
def quantize(name, x_real, min, max):
"""
Randomly quantize in a way that preserves probability mass.
We use a piecewise polynomial spline of order 3.
"""
assert min < max
lb = x_real.detach().floor()
# This cubic spline interpolates over the nearest four integers, ensuring
# piecewise quadratic gradients.
s = x_real - lb
ss = s * s
t = 1 - s
tt = t * t
probs = (
torch.stack(
[
t * tt,
4 + ss * (3 * s - 6),
4 + tt * (3 * t - 6),
s * ss,
],
dim=-1,
)
* (1 / 6)
)
q = pyro.sample("Q_" + name, dist.Categorical(probs)).type_as(x_real)
x = lb + q - 1
x = torch.max(x, 2 * min - 1 - x)
x = torch.min(x, 2 * max + 1 - x)
return pyro.deterministic(name, x)
# Now we can define another equivalent model.
@config_enumerate
def continuous_model(args, data):
# Sample global parameters.
rate_s, prob_i, rho = global_model(args.population)
# Sample reparameterizing variables.
S_aux = pyro.sample(
"S_aux",
dist.Uniform(-0.5, args.population + 0.5)
.mask(False)
.expand(data.shape)
.to_event(1),
)
I_aux = pyro.sample(
"I_aux",
dist.Uniform(-0.5, args.population + 0.5)
.mask(False)
.expand(data.shape)
.to_event(1),
)
# Sequentially sample time-local variables.
S_curr = torch.tensor(args.population - 1.0)
I_curr = torch.tensor(1.0)
for t, datum in poutine.markov(enumerate(data)):
S_prev, I_prev = S_curr, I_curr
S_curr = quantize("S_{}".format(t), S_aux[..., t], min=0, max=args.population)
I_curr = quantize("I_{}".format(t), I_aux[..., t], min=0, max=args.population)
# Now we reverse the computation.
S2I = S_prev - S_curr
I2R = I_prev - I_curr + S2I
pyro.sample(
"S2I_{}".format(t),
dist.ExtendedBinomial(S_prev, -(rate_s * I_prev).expm1()),
obs=S2I,
)
pyro.sample("I2R_{}".format(t), dist.ExtendedBinomial(I_prev, prob_i), obs=I2R)
pyro.sample("obs_{}".format(t), dist.ExtendedBinomial(S2I, rho), obs=datum)
# Now all latent variables in the continuous_model are either continuous or
# enumerated, so we can use HMC. However we need to take special care with
# constraints because the above Markov reparameterization covers regions of
# hypothesis space that are infeasible (i.e. whose log_prob is -infinity). We
# thus heuristically initialize to a feasible point.
def heuristic_init(args, data):
"""Heuristically initialize to a feasible point."""
# Start with a single infection.
S0 = args.population - 1
# Assume 50% <= response rate <= 100%.
S2I = data * min(2.0, (S0 / data.sum()).sqrt())
S_aux = (S0 - S2I.cumsum(-1)).clamp(min=0.5)
# Account for the single initial infection.
S2I[0] += 1
# Assume infection lasts less than a month.
recovery = torch.arange(30.0).div(args.recovery_time).neg().exp()
I_aux = convolve(S2I, recovery)[: len(data)].clamp(min=0.5)
return {
"R0": torch.tensor(2.0),
"rho": torch.tensor(0.5),
"S_aux": S_aux,
"I_aux": I_aux,
}
def infer_hmc_cont(model, args, data):
init_values = heuristic_init(args, data)
return _infer_hmc(args, data, model, init_values=init_values)
# Our final inference trick is to vectorize. We can repurpose DiscreteHMM's
# implementation here, but we'll need to manually represent a Markov
# neighborhood of multiple Categorical of size 4 as single joint Categorical
# with 4 * 4 = 16 states, and then manually perform variable elimination (the
# factors here don't quite conform to DiscreteHMM's interface).
def quantize_enumerate(x_real, min, max):
"""
Randomly quantize in a way that preserves probability mass.
We use a piecewise polynomial spline of order 3.
"""
assert min < max
lb = x_real.detach().floor()
# This cubic spline interpolates over the nearest four integers, ensuring
# piecewise quadratic gradients.
s = x_real - lb
ss = s * s
t = 1 - s
tt = t * t
probs = (
torch.stack(
[
t * tt,
4 + ss * (3 * s - 6),
4 + tt * (3 * t - 6),
s * ss,
],
dim=-1,
)
* (1 / 6)
)
logits = safe_log(probs)
q = torch.arange(-1.0, 3.0)
x = lb.unsqueeze(-1) + q
x = torch.max(x, 2 * min - 1 - x)
x = torch.min(x, 2 * max + 1 - x)
return x, logits
def vectorized_model(args, data):
# Sample global parameters.
rate_s, prob_i, rho = global_model(args.population)
# Sample reparameterizing variables.
S_aux = pyro.sample(
"S_aux",
dist.Uniform(-0.5, args.population + 0.5)
.mask(False)
.expand(data.shape)
.to_event(1),
)
I_aux = pyro.sample(
"I_aux",
dist.Uniform(-0.5, args.population + 0.5)
.mask(False)
.expand(data.shape)
.to_event(1),
)
# Manually enumerate.
S_curr, S_logp = quantize_enumerate(S_aux, min=0, max=args.population)
I_curr, I_logp = quantize_enumerate(I_aux, min=0, max=args.population)
# Truncate final value from the right then pad initial value onto the left.
S_prev = torch.nn.functional.pad(
S_curr[:-1], (0, 0, 1, 0), value=args.population - 1
)
I_prev = torch.nn.functional.pad(I_curr[:-1], (0, 0, 1, 0), value=1)
# Reshape to support broadcasting, similar to EnumMessenger.
T = len(data)
Q = 4
S_prev = S_prev.reshape(T, Q, 1, 1, 1)
I_prev = I_prev.reshape(T, 1, Q, 1, 1)
S_curr = S_curr.reshape(T, 1, 1, Q, 1)
S_logp = S_logp.reshape(T, 1, 1, Q, 1)
I_curr = I_curr.reshape(T, 1, 1, 1, Q)
I_logp = I_logp.reshape(T, 1, 1, 1, Q)
data = data.reshape(T, 1, 1, 1, 1)
# Reverse the S2I,I2R computation.
S2I = S_prev - S_curr
I2R = I_prev - I_curr + S2I
# Compute probability factors.
S2I_logp = dist.ExtendedBinomial(S_prev, -(rate_s * I_prev).expm1()).log_prob(S2I)
I2R_logp = dist.ExtendedBinomial(I_prev, prob_i).log_prob(I2R)
obs_logp = dist.ExtendedBinomial(S2I, rho).log_prob(data)
# Manually perform variable elimination.
logp = S_logp + (I_logp + obs_logp) + S2I_logp + I2R_logp
logp = logp.reshape(-1, Q * Q, Q * Q)
logp = pyro.distributions.hmm._sequential_logmatmulexp(logp)
logp = logp.reshape(-1).logsumexp(0)
logp = logp - math.log(4) # Account for S,I initial distributions.
warn_if_nan(logp)
pyro.factor("obs", logp)
# We can fit vectorized_model exactly as we fit the original continuous_model,
# using our infer_hmc_cont helper. The vectorized model is more than an order
# of magnitude faster than the sequential version, and scales logarithmically
# in time (up to your machine's parallelism).
#
# After inference we have samples of all latent variables. Let's define a
# helper to examine the inferred posterior distributions.
def evaluate(args, samples):
# Print estimated values.
names = {"basic_reproduction_number": "R0", "response_rate": "rho"}
for name, key in names.items():
mean = samples[key].mean().item()
std = samples[key].std().item()
logging.info(
"{}: truth = {:0.3g}, estimate = {:0.3g} \u00B1 {:0.3g}".format(
key, getattr(args, name), mean, std
)
)
# Optionally plot histograms.
if args.plot:
import matplotlib.pyplot as plt
import seaborn as sns
fig, axes = plt.subplots(2, 1, figsize=(5, 5))
axes[0].set_title("Posterior parameter estimates")
for ax, (name, key) in zip(axes, names.items()):
truth = getattr(args, name)
sns.distplot(samples[key], ax=ax, label="posterior")
ax.axvline(truth, color="k", label="truth")
ax.set_xlabel(key + " = " + name.replace("_", " "))
ax.set_yticks(())
ax.legend(loc="best")
plt.tight_layout()
# Prediction and Forecasting
# ==========================
#
# So far we've written four models that each describe the same probability
# distribution. Each successive model made inference cheaper. Next let's move
# beyond inference and consider predicting latent infection rate and
# forecasting future infections.
#
# We'll use Pyro's effect handlers to combine multiple of the above models,
# leveraging the vectorized_model for inference, then the continuous_model to
# compute local latent variables, and finally the original discrete_model to
# forecast forward in time. Let's assume posterior samples have already been
# generated via infer_hmc_cont(vectorized_model, ...).
@torch.no_grad()
def predict(args, data, samples, truth=None):
logging.info("Forecasting {} steps ahead...".format(args.forecast))
particle_plate = pyro.plate("particles", args.num_samples, dim=-1)
# First we sample discrete auxiliary variables from the continuous
# variables sampled in vectorized_model. This samples only time steps
# [0:duration]. Here infer_discrete runs a forward-filter backward-sample
# algorithm. We'll add these new samples to the existing dict of samples.
model = poutine.condition(continuous_model, samples)
model = particle_plate(model)
model = infer_discrete(model, first_available_dim=-2)
with poutine.trace() as tr:
model(args, data)
samples = OrderedDict(
(name, site["value"])
for name, site in tr.trace.nodes.items()
if site["type"] == "sample"
)
# Next we'll run the forward generative process in discrete_model. This
# samples time steps [duration:duration+forecast]. Again we'll update the
# dict of samples.
extended_data = list(data) + [None] * args.forecast
model = poutine.condition(discrete_model, samples)
model = particle_plate(model)
with poutine.trace() as tr:
model(args, extended_data)
samples = OrderedDict(
(name, site["value"])
for name, site in tr.trace.nodes.items()
if site["type"] == "sample"
)
# Finally we'll concatenate the sequentially sampled values into contiguous
# tensors. This operates on the entire time interval [0:duration+forecast].
for key in ("S", "I", "S2I", "I2R"):
pattern = key + "_[0-9]+"
series = [value for name, value in samples.items() if re.match(pattern, name)]
assert len(series) == args.duration + args.forecast
series[0] = series[0].expand(series[1].shape)
samples[key] = torch.stack(series, dim=-1)
S2I = samples["S2I"]
median = S2I.median(dim=0).values
logging.info(
"Median prediction of new infections (starting on day 0):\n{}".format(
" ".join(map(str, map(int, median)))
)
)
# Optionally plot the latent and forecasted series of new infections.
if args.plot:
import matplotlib.pyplot as plt
plt.figure()
time = torch.arange(args.duration + args.forecast)
p05 = S2I.kthvalue(int(round(0.5 + 0.05 * args.num_samples)), dim=0).values
p95 = S2I.kthvalue(int(round(0.5 + 0.95 * args.num_samples)), dim=0).values
plt.fill_between(time, p05, p95, color="red", alpha=0.3, label="90% CI")
plt.plot(time, median, "r-", label="median")
plt.plot(time[: args.duration], data, "k.", label="observed")
if truth is not None:
plt.plot(time, truth, "k--", label="truth")
plt.axvline(args.duration - 0.5, color="gray", lw=1)
plt.xlim(0, len(time) - 1)
plt.ylim(0, None)
plt.xlabel("day after first infection")
plt.ylabel("new infections per day")
plt.title("New infections in population of {}".format(args.population))
plt.legend(loc="upper left")
plt.tight_layout()
return samples
# Experiments
# ===========
#
# Finally we'll define an experiment runner. For example we can simulate 60
# days of infection on a population of 10000 and forecast forward another 30
# days, and plot the results as follows (takes about 3 minutes on my laptop):
#
# python sir_hmc.py -p 10000 -d 60 -f 30 --plot
def main(args):
pyro.set_rng_seed(args.rng_seed)
dataset = generate_data(args)
obs = dataset["obs"][: args.duration]
# Choose among inference methods.
if args.enum:
samples = infer_hmc_enum(args, obs)
elif args.sequential:
samples = infer_hmc_cont(continuous_model, args, obs)
else:
samples = infer_hmc_cont(vectorized_model, args, obs)
# Evaluate fit.
evaluate(args, samples)
# Predict latent time series.
if args.forecast:
samples = predict(args, obs, samples, truth=dataset["S2I"])
return samples
if __name__ == "__main__":
assert pyro.__version__.startswith("1.7.0")
parser = argparse.ArgumentParser(description="SIR epidemiology modeling using HMC")
parser.add_argument("-p", "--population", default=10, type=int)
parser.add_argument("-m", "--min-observations", default=3, type=int)
parser.add_argument("-d", "--duration", default=10, type=int)
parser.add_argument("-f", "--forecast", default=0, type=int)
parser.add_argument("-R0", "--basic-reproduction-number", default=1.5, type=float)
parser.add_argument("-tau", "--recovery-time", default=7.0, type=float)
parser.add_argument("-rho", "--response-rate", default=0.5, type=float)
parser.add_argument(
"-e", "--enum", action="store_true", help="use the full enumeration model"
)
parser.add_argument(
"-s",
"--sequential",
action="store_true",
help="use the sequential continuous model",
)
parser.add_argument("-n", "--num-samples", default=200, type=int)
parser.add_argument("-w", "--warmup-steps", default=100, type=int)
parser.add_argument("-t", "--max-tree-depth", default=5, type=int)
parser.add_argument("-r", "--rng-seed", default=0, type=int)
parser.add_argument("--double", action="store_true")
parser.add_argument("--jit", action="store_true")
parser.add_argument("--cuda", action="store_true")
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--plot", action="store_true")
args = parser.parse_args()
if args.double:
if args.cuda:
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
else:
torch.set_default_tensor_type(torch.DoubleTensor)
elif args.cuda:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
main(args)
if args.plot:
import matplotlib.pyplot as plt
plt.show()
| uber/pyro | examples/sir_hmc.py | Python | apache-2.0 | 24,446 |
from __future__ import with_statement
from nose.tools import assert_equal, assert_raises #@UnresolvedImport
from whoosh import analysis, highlight, fields, qparser, query
from whoosh.compat import u
from whoosh.filedb.filestore import RamStorage
_doc = u("alfa bravo charlie delta echo foxtrot golf hotel india juliet kilo lima")
def test_null_fragment():
terms = frozenset(("bravo", "india"))
sa = analysis.StandardAnalyzer()
nf = highlight.WholeFragmenter()
uc = highlight.UppercaseFormatter()
htext = highlight.highlight(_doc, terms, sa, nf, uc)
assert_equal(htext, "alfa BRAVO charlie delta echo foxtrot golf hotel INDIA juliet kilo lima")
def test_sentence_fragment():
text = u("This is the first sentence. This one doesn't have the word. This sentence is the second. Third sentence here.")
terms = ("sentence", )
sa = analysis.StandardAnalyzer(stoplist=None)
sf = highlight.SentenceFragmenter()
uc = highlight.UppercaseFormatter()
htext = highlight.highlight(text, terms, sa, sf, uc)
assert_equal(htext, "This is the first SENTENCE...This SENTENCE is the second...Third SENTENCE here")
def test_context_fragment():
terms = frozenset(("bravo", "india"))
sa = analysis.StandardAnalyzer()
cf = highlight.ContextFragmenter(surround=6)
uc = highlight.UppercaseFormatter()
htext = highlight.highlight(_doc, terms, sa, cf, uc)
assert_equal(htext, "alfa BRAVO charlie...hotel INDIA juliet")
def test_context_at_start():
terms = frozenset(["alfa"])
sa = analysis.StandardAnalyzer()
cf = highlight.ContextFragmenter(surround=15)
uc = highlight.UppercaseFormatter()
htext = highlight.highlight(_doc, terms, sa, cf, uc)
assert_equal(htext, "ALFA bravo charlie delta echo foxtrot")
def test_html_format():
terms = frozenset(("bravo", "india"))
sa = analysis.StandardAnalyzer()
cf = highlight.ContextFragmenter(surround=6)
hf = highlight.HtmlFormatter()
htext = highlight.highlight(_doc, terms, sa, cf, hf)
assert_equal(htext, 'alfa <strong class="match term0">bravo</strong> charlie...hotel <strong class="match term1">india</strong> juliet')
def test_html_escape():
terms = frozenset(["bravo"])
sa = analysis.StandardAnalyzer()
wf = highlight.WholeFragmenter()
hf = highlight.HtmlFormatter()
htext = highlight.highlight(u('alfa <bravo "charlie"> delta'), terms, sa, wf, hf)
assert_equal(htext, 'alfa <<strong class="match term0">bravo</strong> "charlie"> delta')
def test_maxclasses():
terms = frozenset(("alfa", "bravo", "charlie", "delta", "echo"))
sa = analysis.StandardAnalyzer()
cf = highlight.ContextFragmenter(surround=6)
hf = highlight.HtmlFormatter(tagname="b", termclass="t", maxclasses=2)
htext = highlight.highlight(_doc, terms, sa, cf, hf)
assert_equal(htext, '<b class="match t0">alfa</b> <b class="match t1">bravo</b> <b class="match t0">charlie</b>...<b class="match t1">delta</b> <b class="match t0">echo</b> foxtrot')
def test_workflow_easy():
schema = fields.Schema(id=fields.ID(stored=True),
title=fields.TEXT(stored=True))
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(id=u("1"), title=u("The man who wasn't there"))
w.add_document(id=u("2"), title=u("The dog who barked at midnight"))
w.add_document(id=u("3"), title=u("The invisible man"))
w.add_document(id=u("4"), title=u("The girl with the dragon tattoo"))
w.add_document(id=u("5"), title=u("The woman who disappeared"))
w.commit()
with ix.searcher() as s:
# Parse the user query
parser = qparser.QueryParser("title", schema=ix.schema)
q = parser.parse(u("man"))
r = s.search(q, terms=True)
assert_equal(len(r), 2)
r.fragmenter = highlight.WholeFragmenter()
r.formatter = highlight.UppercaseFormatter()
outputs = [hit.highlights("title") for hit in r]
assert_equal(outputs, ["The invisible MAN", "The MAN who wasn't there"])
def test_workflow_manual():
schema = fields.Schema(id=fields.ID(stored=True),
title=fields.TEXT(stored=True))
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(id=u("1"), title=u("The man who wasn't there"))
w.add_document(id=u("2"), title=u("The dog who barked at midnight"))
w.add_document(id=u("3"), title=u("The invisible man"))
w.add_document(id=u("4"), title=u("The girl with the dragon tattoo"))
w.add_document(id=u("5"), title=u("The woman who disappeared"))
w.commit()
with ix.searcher() as s:
# Parse the user query
parser = qparser.QueryParser("title", schema=ix.schema)
q = parser.parse(u("man"))
# Extract the terms the user used in the field we're interested in
terms = [text for fieldname, text in q.all_terms()
if fieldname == "title"]
# Perform the search
r = s.search(q)
assert_equal(len(r), 2)
# Use the same analyzer as the field uses. To be sure, you can
# do schema[fieldname].analyzer. Be careful not to do this
# on non-text field types such as DATETIME.
analyzer = schema["title"].analyzer
# Since we want to highlight the full title, not extract fragments,
# we'll use WholeFragmenter.
nf = highlight.WholeFragmenter()
# In this example we'll simply uppercase the matched terms
fmt = highlight.UppercaseFormatter()
outputs = []
for d in r:
text = d["title"]
outputs.append(highlight.highlight(text, terms, analyzer, nf, fmt))
assert_equal(outputs, ["The invisible MAN", "The MAN who wasn't there"])
def test_unstored():
schema = fields.Schema(text=fields.TEXT, tags=fields.KEYWORD)
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(text=u("alfa bravo charlie"), tags=u("delta echo"))
w.commit()
hit = ix.searcher().search(query.Term("text", "bravo"))[0]
assert_raises(KeyError, hit.highlights, "tags")
def test_multifilter():
iwf_for_index = analysis.IntraWordFilter(mergewords=True, mergenums=False)
iwf_for_query = analysis.IntraWordFilter(mergewords=False, mergenums=False)
mf = analysis.MultiFilter(index=iwf_for_index, query=iwf_for_query)
ana = analysis.RegexTokenizer() | mf | analysis.LowercaseFilter()
schema = fields.Schema(text=fields.TEXT(analyzer=ana, stored=True))
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(text=u("Our BabbleTron5000 is great"))
w.commit()
with ix.searcher() as s:
hit = s.search(query.Term("text", "5000"))[0]
assert_equal(hit.highlights("text"), 'Our BabbleTron<b class="match term0">5000</b> is great')
def test_pinpoint():
domain = u("alfa bravo charlie delta echo foxtrot golf hotel india juliet "
"kilo lima mike november oskar papa quebec romeo sierra tango")
schema = fields.Schema(text=fields.TEXT(stored=True, chars=True))
ix = RamStorage().create_index(schema)
w = ix.writer()
w.add_document(text=domain)
w.commit()
assert ix.schema["text"].supports("characters")
with ix.searcher() as s:
r = s.search(query.Term("text", "juliet"), terms=True)
hit = r[0]
hi = highlight.Highlighter()
hi.formatter = highlight.UppercaseFormatter()
assert not hi.can_load_chars(r, "text")
assert_equal(hi.highlight_hit(hit, "text"), "golf hotel india JULIET kilo lima mike november")
hi.fragmenter = highlight.PinpointFragmenter()
assert hi.can_load_chars(r, "text")
assert_equal(hi.highlight_hit(hit, "text"), "ot golf hotel india JULIET kilo lima mike nove")
hi.fragmenter.autotrim = True
assert_equal(hi.highlight_hit(hit, "text"), "golf hotel india JULIET kilo lima mike")
def test_highlight_wildcards():
schema = fields.Schema(text=fields.TEXT(stored=True))
ix = RamStorage().create_index(schema)
with ix.writer() as w:
w.add_document(text=u("alfa bravo charlie delta cookie echo"))
with ix.searcher() as s:
qp = qparser.QueryParser("text", ix.schema)
q = qp.parse(u("c*"))
r = s.search(q)
assert_equal(r.scored_length(), 1)
r.formatter = highlight.UppercaseFormatter()
hit = r[0]
assert_equal(hit.highlights("text"), "alfa bravo CHARLIE delta COOKIE echo")
| mzdaniel/oh-mainline | vendor/packages/whoosh/tests/test_highlighting.py | Python | agpl-3.0 | 8,649 |
# -*- coding: utf-8 -*-
import re
from threading import Timer
import sublime
import sublime_plugin
from .console_logging import getLogger
from .daemon import ask_daemon
from .utils import get_settings, is_python_scope, is_repl
logger = getLogger(__name__)
FOLLOWING_CHARS = {"\r", "\n", "\t", " ", ")", "]", ";", "}", "\x00"}
PLUGIN_ONLY_COMPLETION = (
sublime.INHIBIT_WORD_COMPLETIONS |
sublime.INHIBIT_EXPLICIT_COMPLETIONS
)
def debounce(wait):
""" Decorator that will postpone a functions
execution until after wait seconds
have elapsed since the last time it was invoked. """
def decorator(fn):
def debounced(*args, **kwargs):
def call_it():
fn(*args, **kwargs)
try:
debounced.t.cancel()
except(AttributeError):
pass
debounced.t = Timer(wait, call_it)
debounced.t.start()
return debounced
return decorator
@debounce(0.2)
def debounced_ask_daemon(*args, **kwargs):
ask_daemon(*args, **kwargs)
class SublimeJediParamsAutocomplete(sublime_plugin.TextCommand):
"""
Function / Class constructor autocompletion command
"""
def run(self, edit, characters='('):
"""
Insert completion character, and complete function parameters
if possible
:param edit: sublime.Edit
:param characters: str
"""
self._insert_characters(edit, characters, ')')
if get_settings(self.view)['complete_funcargs']:
ask_daemon(
self.view,
self.show_template,
'funcargs',
location=self.view.sel()[0].end()
)
@property
def auto_match_enabled(self):
""" check if sublime closes parenthesis automaticly """
return self.view.settings().get('auto_match_enabled', True)
def _insert_characters(self, edit, open_pair, close_pair):
"""
Insert autocomplete character with closed pair
and update selection regions
If sublime option `auto_match_enabled` turned on, next behavior have to be:
when none selection
`( => (<caret>)`
`<caret>1 => ( => (<caret>1`
when text selected
`text => (text<caret>)`
In other case:
when none selection
`( => (<caret>`
when text selected
`text => (<caret>`
:param edit: sublime.Edit
:param characters: str
"""
regions = [a for a in self.view.sel()]
self.view.sel().clear()
for region in reversed(regions):
next_char = self.view.substr(region.begin())
# replace null byte to prevent error
next_char = next_char.replace('\x00', '\n')
logger.debug("Next characters: {0}".format(next_char))
following_text = next_char not in FOLLOWING_CHARS
logger.debug("Following text: {0}".format(following_text))
if self.auto_match_enabled:
self.view.insert(edit, region.begin(), open_pair)
position = region.end() + 1
# IF selection is non-zero
# OR after cursor no any text and selection size is zero
# THEN insert closing pair
if region.size() > 0 or not following_text and region.size() == 0:
self.view.insert(edit, region.end() + 1, close_pair)
position += (len(open_pair) - 1)
else:
self.view.replace(edit, region, open_pair)
position = region.begin() + len(open_pair)
self.view.sel().add(sublime.Region(position, position))
def show_template(self, view, template):
view.run_command('insert_snippet', {"contents": template})
class Autocomplete(sublime_plugin.ViewEventListener):
"""Sublime Text autocompletion integration."""
_completions = []
_previous_completions = []
_last_location = None
def __enabled(self):
settings = get_settings(self.view)
if sublime.active_window().active_view().id() != self.view.id():
return False
if is_repl(self.view) and not settings['enable_in_sublime_repl']:
logger.debug("JEDI does not complete in SublimeREPL views.")
return False
if not is_python_scope(self.view, self.view.sel()[0].begin()):
logger.debug('JEDI completes only in python scope.')
return False
return True
def on_post_text_command(self, command, args):
"""Complete call arguments of a just committed function."""
if command != 'commit_completion' or not self.__enabled():
return
location = self.view.sel()[0]
# do not autocomplete on import lines
line = self.view.substr(self.view.line(location)).split()
if 'import' in line:
return
committed = self.view.substr(self.view.word(location))
for display, insert in self._completions:
if committed == insert and display.endswith('\tfunction'):
self.view.run_command('sublime_jedi_params_autocomplete')
break
def on_query_completions(self, prefix, locations):
"""Sublime autocomplete event handler.
Get completions depends on current cursor position and return
them as list of ('possible completion', 'completion type')
:param prefix: string for completions
:type prefix: basestring
:param locations: offset from beginning
:type locations: int
:return: list of tuple(str, str)
"""
if not self.__enabled():
return False
logger.info('JEDI completion triggered.')
settings = get_settings(self.view)
if settings['only_complete_after_regex']:
previous_char = self.view.substr(locations[0] - 1)
if not re.match(settings['only_complete_after_regex'], previous_char): # noqa
return False
if self._last_location != locations[0]:
self._last_location = locations[0]
debounced_ask_daemon(
self.view,
self._receive_completions,
'autocomplete',
location=locations[0],
)
return [], PLUGIN_ONLY_COMPLETION
if self._last_location == locations[0]:
self._last_location = None
return self._completions
def _receive_completions(self, view, completions):
if not completions:
return
logger.debug("Completions: {0}".format(completions))
self._previous_completions = self._completions
self._completions = completions
if (completions and (
not view.is_auto_complete_visible() or
not self._is_completions_subset())):
only_jedi_completion = (
get_settings(self.view)['sublime_completions_visibility']
in ('default', 'jedi')
)
view.run_command('hide_auto_complete')
view.run_command('auto_complete', {
'api_completions_only': only_jedi_completion,
'disable_auto_insert': True,
'next_completion_if_showing': False,
})
def _is_completions_subset(self):
completions = {completion for _, completion in self._completions}
previous = {completion for _, completion in self._previous_completions}
return completions.issubset(previous)
| srusskih/SublimeJEDI | sublime_jedi/completion.py | Python | mit | 7,646 |
# -*- coding: utf-8 -*-
import os
from os.path import join as opj
import sys
import cherrypy
#from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
#from ws4py.websocket import WebSocket
import threading
from babel.support import Translations
import locale
import jinja2
from jinja2 import Environment, FileSystemLoader
from webtools.ajax import is_xhr
#from webtools.templateddlforms import *
from webtools.angular import TriangleUndefined, angular_filter
env = Environment(loader=FileSystemLoader(''),extensions=['jinja2.ext.i18n','jinja2.ext.with_','jinja2.ext.do','jinja2.ext.loopcontrols','jinja2.ext.autoescape'])
env.undefined = TriangleUndefined
env.filters['angular'] = angular_filter
env.globals['pylist'] = __builtins__.list
env.globals['pydict'] = __builtins__.dict
translations = Translations.load('translations', locale.getdefaultlocale())
env.install_gettext_translations(translations)
#WebSocketPlugin(cherrypy.engine).subscribe()
#cherrypy.tools.websocket = WebSocketTool()
class Root(object):
def __init__(self):
pwd = os.getcwd()
CONFIG_FILE = opj(pwd,'conf/webserver.conf')
from optparse import OptionParser
from webconfig import webconfigManager
parser = OptionParser(version = '1.0', description = 'Web Server of global system resource planned')
parser.add_option('-c','--config',type='string', dest = 'config_file',help ='Config file', default = CONFIG_FILE)
options,arguments=parser.parse_args()
if 'config_path' in options.__dict__:
if options.__dict__['config_path'] != CONFIG_PATH:
CONFIG_PATH = options.__dict__['config_path']
webconfig = webconfigManager(CONFIG_FILE)
if webconfig['globals']['mode'] == 'external':
import local
from remote.webdispatcher import RemoteDispatcher as Dispatcher
self._dispatcher = Dispatcher(managers=local.manager.MetaManager.__list_managers__, gsrp_root=webconfig['globals']['gsrp_root'], mode=webconfig['globals']['mode'], service=webconfig['globals']['interface'],options=webconfig[webconfig['globals']['interface']])
#self._dispatcher._connect(webconfig['globals']['interface'], webconfig[webconfig['globals']['interface']])
else:
sys.path.insert(1,webconfig['globals']['gsrp_root'])
from remote.webdispatcher import RemoteDispatcher as Dispatcher
import local
import managers
self._dispatcher = Dispatcher(dict(list(managers.manager.MetaManager.__list_managers__.items()) + list(local.manager.MetaManager.__list_managers__.items())),webconfig['globals']['gsrp_root'],'internal')
self._dispatcher.env = env
@cherrypy.expose
def index(self):
if cherrypy.request.method == "GET":
#print('headers:',cherrypy.request.headers)
print('Logging:',self._dispatcher._execute(['web.db.auth.Login'],{'database':'tst','user':'postgres','password':'admin','host':'localhost','port':5432}))
return env.get_template("web/index.html").render()
@cherrypy.expose
@cherrypy.tools.gzip()
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def auth(self):
if cherrypy.request.method == "POST":
RLOCK = threading.RLock()
RLOCK.acquire()
if is_xhr():
jq = cherrypy.request.json
area = jq['args'][0].split('.')[0]
#print('headers:',cherrypy.request.headers)
#print('JQ: %s' % jq)
if area == 'auth':
if 'args' in jq and 'kwargs' in jq:
return self._dispatcher._execute(jq[u'args'], jq[u'kwargs'])
elif 'args' in jq:
return self._dispatcher._execute(jq[u'args'])
else:
raise AttributeError
elif area == 'app':
herrypy.lib.cptools.redirect(url='/web', internal=True, debug=False)
elif area == 'dbm':
herrypy.lib.cptools.redirect(url='dbm', internal=True, debug=False)
else:
raise KeyError('Area must be in %s present %s' % ("'app','auth','dbm'",area))
else:
print('HEADERS:',cherrypy.request.headers)
print('JSON:',cherrypy.request.json)
RLOCK.release()
@cherrypy.expose
@cherrypy.tools.gzip()
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def db(self):
if cherrypy.request.method == "POST":
RLOCK = threading.RLock()
RLOCK.acquire()
if is_xhr():
jq = cherrypy.request.json
area = jq['args'][0].split('.')[0]
#print('headers:',cherrypy.request.headers)
#print('JQ: %s' % jq)
if area == 'dbm':
if 'args' in jq and 'kwargs' in jq:
return self._dispatcher._execute(jq[u'args'], jq[u'kwargs'])
elif 'args' in jq:
return self._dispatcher._execute(jq[u'args'])
else:
raise AttributeError
elif area == 'app':
herrypy.lib.cptools.redirect(url='/web', internal=True, debug=False)
elif area == 'auth':
herrypy.lib.cptools.redirect(url='auth', internal=True, debug=False)
else:
raise KeyError('Area must be in %s present %s' % ("'app','auth','dbm'",area))
else:
print('HEADERS:',cherrypy.request.headers)
print('JSON:',cherrypy.request.json)
RLOCK.release()
@cherrypy.expose
@cherrypy.tools.gzip()
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def web(self):
if cherrypy.request.method == "POST":
RLOCK = threading.RLock()
RLOCK.acquire()
if is_xhr():
jq = cherrypy.request.json
area = jq['args'][0].split('.')[0]
#print('headers:',cherrypy.request.headers)
#print('JQ: %s' % jq)
if area == 'app':
if 'args' in jq and 'kwargs' in jq:
return self._dispatcher._execute(jq[u'args'], jq[u'kwargs'])
elif 'args' in jq:
return self._dispatcher._execute(jq[u'args'])
else:
raise AttributeError
elif area == 'auth':
herrypy.lib.cptools.redirect(url='/auth', internal=True, debug=False)
elif area == 'dbm':
herrypy.lib.cptools.redirect(url='dbm', internal=True, debug=False)
else:
raise KeyError('Area must be in %s present %s' % ("'app','auth','dbm'",area))
else:
print('HEADERS:',cherrypy.request.headers)
print('JSON:',cherrypy.request.json)
RLOCK.release()
def secureheaders():
headers = cherrypy.response.headers
headers['X-Frame-Options'] = 'DENY'
headers['X-XSS-Protection'] = '1; mode=block'
#headers['Content-Security-Policy'] = "default-src 'self'; script-src 'self'; connect-src 'self'; img-src 'self'; style-src 'unsafe-inline';font-src 'self'"
if (cherrypy.server.ssl_certificate != None and cherrypy.server.ssl_private_key != None):
headers['Strict-Transport-Security'] = 'max-age=31536000'
# set the priority according to your needs if you are hooking something
# else on the 'before_finalize' hook point.
cherrypy.tools.secureheaders = cherrypy.Tool('before_request_body', secureheaders, priority=60)
if __name__ == "__main__":
cherrypy.config.update("server.conf")
#cherrypy.config.update({"tools.websocket.on": True,"tools.websocket.handler_cls": WebSocket})
#print('config:',cherrypy.config)
cherrypy.quickstart(root=Root(),config="app.conf")
| NikolayChesnokov/webgsrp3 | webserver.py | Python | agpl-3.0 | 6,854 |
# This component generates test points within a zone and calculates view factors of each of these points to the other surfaces of the zone.
#
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Chris Mackey <Chris@MackeyArchitecture.com>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Use this component to generate test points within a zone and calculate the view factor from each of these points to the other zurfaces in a zone as well as the sky.
_
This component is a necessary step before creating an thermal map of an energy model.
-
Provided by Honeybee 0.0.66
Args:
_HBZones: The HBZones out of any of the HB components that generate or alter zones. Note that these should ideally be the zones that are fed into the Run Energy Simulation component as surfaces may not align otherwise. Zones read back into Grasshopper from the Import idf component will not align correctly with the EP Result data.
gridSize_: A number in Rhino model units to make each cell of the view factor mesh.
distFromFloorOrSrf_: A number in Rhino model units to set the distance of the view factor mesh from the floor of the zones. Alternatively, this can be a surface or list of surfaces on which you are interested in studying thermal comfort. Lastly, it can be a list of points at which you want to evaluate microclimate conditions.
additionalShading_: Add additional shading breps or meshes to account for geometry that is not a part of the zone but can still block direct sunlight to occupants. Examples include outdoor context shading and indoor furniture.
addShdTransmiss_: An optional transmissivity that will be used for all of the objects connected to the additionalShading_ input. This can also be a list of transmissivities whose length matches the number of breps connected to additionalShading_ input, which will assign a different transmissivity to each object. Lastly, this input can also accept a data tree with a number of branches equal to the number of objects connected to the additionalShading_ input with a number of values in each branch that march the number of hours in the simulated analysisPeriod (so, for an annual simulation, each branch would have 8760 values). The default is set to assume that all additionalShading_ objects are completely opaque. As one adds in transmissivities with this input, the calculation time will increase accordingly.
============: ...
viewResolution_: An interger between 0 and 4 to set the number of times that the tergenza skyview patches are split. A higher number will ensure a greater accuracy but will take longer. The default is set to 0 for a quick calculation.
removeAirWalls_: Set to "True" to remove air walls from the view factor calculation. The default is set to "True" sinc you usually want to remove air walls from your view factor calculations.
includeOutdoor_: Set to 'True' to have the final visualization take the parts of the input Srf that are outdoors and color them with temperatures representative of outdoor conditions. Note that these colors of conditions will only approximate those of the outdoors, showing the assumptions of the Energy model rather than being a perfectly accurate representation of outdoor conditions. The default is set to 'False' as the inclusion of outdoor conditions can often increase the calculation time.
============: ...
parallel_: Set to "True" to run the calculation with multiple cores and "False" to run it with a single core. Multiple cores can increase the speed of the calculation substantially and is recommended if you are not running other big or important processes. The default is set to "True."
_buildMesh: Set boolean to "True" to generate a mesh based on your zones and the input distFromFloorOrSrf_ and gridSize_. This is a necessary step before calculating view factors from each test point to the surrounding zone surfaces.
_runIt: Set boolean to "True" to run the component and calculate viewFactors from each test point to surrounding surfaces.
Returns:
readMe!: ...
==========: ...
viewFactorMesh: A data tree of meshes to be plugged into the "Annual Comfort Analysis Recipe" component.
viewFactorInfo: A list of python data that carries essential numerical information for the Comfort Analysis Workflow, including the view factors from each test point to a zone's surfaces, the sky view factors of the test points, and information related to window plaement, used to estimate stratification in the zone. This should be plugged into a "Comfort Analysis Recipe" component.
==========: ...
testPts: The test points, which lie in the center of the mesh faces at which comfort parameters are being evaluated.
viewFactorMesh: A data tree of breps representing the split mesh faces of the view factor mesh.
zoneWireFrame: A list of curves representing the outlines of the zones. This is particularly helpful if you want to see the outline of the building in relation to the temperature and comfort maps that you might produce off of these results.
viewVectors: The vectors that were used to caclulate the view factor (note that these will increase as the viewResolution increases).
shadingContext: A list of meshes representing the opaque surfaces of the zone. These are what were used to determine the sky view factor and the direct sun falling on occupants.
closedAirVolumes: The closed Breps representing the zones of continuous air volume (when air walls are excluded). Zones within the same breps will have the stratification calculation done together.
"""
ghenv.Component.Name = "Honeybee_Indoor View Factor Calculator"
ghenv.Component.NickName = 'IndoorViewFactor'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "10 | Energy | Energy"
#compatibleHBVersion = VER 0.0.56\nDEC_15_2017
#compatibleLBVersion = VER 0.0.59\nJUN_25_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "6"
except: pass
from System import Object
from System import Drawing
import Grasshopper.Kernel as gh
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
import Rhino as rc
import rhinoscriptsyntax as rs
import scriptcontext as sc
import operator
import System.Threading.Tasks as tasks
import time
w = gh.GH_RuntimeMessageLevel.Warning
tol = sc.doc.ModelAbsoluteTolerance
def copyHBZoneData():
#Make a check for the zones.
checkZones = True
#Calls the zones and the libraries from the hive.
hb_hive = sc.sticky["honeybee_Hive"]()
hb_EPMaterialAUX = sc.sticky["honeybee_EPMaterialAUX"]()
#Make lists to be filles
surfaceNames = []
srfBreps = []
zoneBreps = []
zoneCentPts = []
srfTypes = []
srfInteriorList = []
zoneNames = []
zoneNatVentArea = []
zoneVolumes = []
srfAirWallAdjList = []
windowSrfTransmiss = []
srfInteriorWindowList = []
srfIntWindowAdjList = []
modelHasIntWindows = False
zoneFloorReflect = []
zoneRoofReflect = []
for zoneCount, HZone in enumerate(_HBZones):
#Append lists to be filled.
surfaceNames.append([])
srfBreps.append([])
srfTypes.append([])
srfInteriorList.append([])
srfAirWallAdjList.append([])
srfInteriorWindowList.append([])
srfIntWindowAdjList.append([])
windowSrfTransmiss.append([])
zoneFloorReflect.append([])
zoneBreps.append(HZone)
zoneCentPts.append(HZone.GetBoundingBox(False).Center)
#Copy some of the basic zone properties.
zone = hb_hive.visualizeFromHoneybeeHive([HZone])[0]
zoneNames.append(zone.name)
zoneNatVentArea.append(zone.windowOpeningArea)
zoneVolumes.append(zone.getZoneVolume())
#Copy surface properties, including the adjacencies.
for srf in zone.surfaces:
surfaceNames[zoneCount].append(srf.name)
srfTypes[zoneCount].append(srf.type)
if srf.BC.lower() == "surface":
if srf.type == 4:
srfInteriorList[zoneCount].append(str(srf.BCObject).split('\n')[0].split(': ')[-1])
srfAirWallAdjList[zoneCount].append(str(srf.BCObject.parent).split('\n')[0].split(': ')[-1])
srfInteriorWindowList[zoneCount].append(str(srf.BCObject).split('\n')[0].split(': ')[-1])
srfIntWindowAdjList[zoneCount].append(str(srf.BCObject.parent).split('\n')[0].split(': ')[-1])
else:
srfInteriorList[zoneCount].append(None)
srfAirWallAdjList[zoneCount].append(str(srf.BCObject.parent).split('\n')[0].split(': ')[-1])
srfInteriorWindowList[zoneCount].append(None)
srfIntWindowAdjList[zoneCount].append(str(srf.BCObject.parent).split('\n')[0].split(': ')[-1])
else:
srfInteriorList[zoneCount].append(None)
srfAirWallAdjList[zoneCount].append(None)
srfInteriorWindowList[zoneCount].append(None)
srfIntWindowAdjList[zoneCount].append(None)
if srf.hasChild:
srfBreps[zoneCount].append(srf.punchedGeometry)
windowSrfTransmiss[zoneCount].append(0)
for srfCount, childSrf in enumerate(srf.childSrfs):
srfTypes[zoneCount].append(childSrf.type)
surfaceNames[zoneCount].append(childSrf.name)
srfBreps[zoneCount].append(childSrf.geometry)
#Calculate the transmissivity of the window from the construction material properties.
windowCnstr = childSrf.EPConstruction
if windowCnstr == None:
if srf.BC.lower() == "surface": floorCnstr = 'INTERIOR WINDOW'
else: floorCnstr = 'EXTERIOR WINDOW'
windowLayers = hb_EPMaterialAUX.decomposeEPCnstr(windowCnstr.upper())[0]
winTrans = 1
for layer in windowLayers:
propNumbers = hb_EPMaterialAUX.decomposeMaterial(layer.upper(), ghenv.Component)[0]
if 'WindowMaterial:Glazing' in propNumbers[0]:
try:
winTrans = winTrans*float(propNumbers[4])
except:
winTrans = 0.4
elif 'WindowMaterial:SimpleGlazingSystem' in propNumbers[0]:
winTrans = winTrans*float(propNumbers[2])
windowSrfTransmiss[zoneCount].append(winTrans)
if srf.BC.lower() == "surface":
modelHasIntWindows = True
srfInteriorList[zoneCount].append(None)
srfAirWallAdjList[zoneCount].append(None)
srfInteriorWindowList[zoneCount].append(str(srf.BCObject).split('\n')[0].split(': ')[-1])
srfIntWindowAdjList[zoneCount].append(str(srf.BCObject.parent).split('\n')[0].split(': ')[-1])
else:
srfInteriorList[zoneCount].append(None)
srfAirWallAdjList[zoneCount].append(None)
srfInteriorWindowList[zoneCount].append(None)
srfIntWindowAdjList[zoneCount].append(None)
zoneFloorReflect[zoneCount].append(None)
zoneRoofReflect.append(None)
else:
srfBreps[zoneCount].append(srf.geometry)
windowSrfTransmiss[zoneCount].append(0)
if srf.type == 2 or srf.type == 2.25 or srf.type == 2.5 or srf.type == 2.75 or srf.type == 1 or srf.type == 1.5:
floorCnstr = srf.EPConstruction
if floorCnstr == None:
if srf.type == 2 or srf.type == 2.25 or srf.type == 2.5 or srf.type == 2.75: floorCnstr = 'INTERIOR FLOOR'
else: floorCnstr = 'EXTERIOR ROOF'
floorInnerMat = hb_EPMaterialAUX.decomposeEPCnstr(floorCnstr.upper())[0][-1]
propNumbers = hb_EPMaterialAUX.decomposeMaterial(floorInnerMat.upper(), ghenv.Component)[0]
if 'Material:NoMass' in propNumbers[0]:
solRef = 1 - float(propNumbers[4])
elif 'Material' in propNumbers[0]:
try:
solRef = 1 - float(propNumbers[7])
except:
solRef = 0.5
if srf.type == 1 or srf.type == 1.5:
zoneRoofReflect.append(solRef)
zoneFloorReflect[zoneCount].append(None)
else:
zoneFloorReflect[zoneCount].append(solRef)
zoneRoofReflect.append(None)
else:
zoneFloorReflect[zoneCount].append(None)
zoneRoofReflect.append(None)
zoneFloorReflect.append(zoneRoofReflect)
#Change the list of adjacent zones to be based on the list item of the zone instead of the name of the zone.
def changeName2Num(theAdjNameList):
adjNumList = []
for srfListCount, zoneSrfList in enumerate(theAdjNameList):
adjNumList.append([])
for surface in zoneSrfList:
foundZone = False
for zoneCount, zoneName in enumerate(zoneNames):
if surface == zoneName:
adjNumList[srfListCount].append(zoneCount)
foundZone = True
if foundZone == False:
adjNumList[srfListCount].append(None)
return adjNumList
srfAirWallAdjNumList = changeName2Num(srfAirWallAdjList)
srfIntWindowAdjNumList = changeName2Num(srfIntWindowAdjList)
sc.sticky["Honeybee_ViewFacotrSrfData"] = [zoneBreps, surfaceNames, srfBreps, zoneCentPts, srfTypes, srfInteriorList, zoneNames, zoneNatVentArea, zoneVolumes, srfAirWallAdjNumList, checkZones, windowSrfTransmiss, modelHasIntWindows, srfInteriorWindowList, srfIntWindowAdjNumList, zoneFloorReflect]
def checkTheInputs():
#Check to make sure that all connected zones are closed breps.
checkData4 = True
if _HBZones != []:
for closedZone in _HBZones:
if closedZone.IsSolid: pass
else: checkData4 = False
if checkData4 == False:
warning = "One or more of your connected HBZones is not a closed brep. Zones must be closed in order to run this component correctly."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
#Check the grid size and set a default based on the size of each zone if nothing is connected.
rhinoModelUnits = str(sc.doc.ModelUnitSystem)
checkData1 = False
gridSize = None
if gridSize_ == None:
if _HBZones != []:
checkData1 = True
dimensions = []
for zone in _HBZones:
zoneBBox = rc.Geometry.Box(zone.GetBoundingBox(rc.Geometry.Plane.WorldXY))
dimensions.append(zoneBBox.X[1] - zoneBBox.X[0])
dimensions.append(zoneBBox.Y[1] - zoneBBox.Y[0])
dimensions.append(zoneBBox.Z[1] - zoneBBox.Z[0])
dimensions.sort()
shortestDim = dimensions[0]
gridSize = shortestDim/5
gridSzStatement = "No value connected for gridSize_. A default gridsize of " + str(gridSize) + " " + rhinoModelUnits + " has been chosen based on the dimensions of your zone geometry."
print gridSzStatement
elif gridSize_ > 0:
checkData1 = True
gridSize = gridSize_
gridSzStatement = "Gridsize has been set to " + str(gridSize) + " " + rhinoModelUnits + "."
print gridSzStatement
else:
warning = "gridSize_ must be a value that is creater than 0."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
#Check the distFromFloorOrSrf_ and set a default based on the rhino model units if the user has not connected anything.
sectionBreps = []
if distFromFloorOrSrf_:
try:
floatTest = float(distFromFloorOrSrf_[0])
sectionMethod = 0
except:
try:
try:
rc.Geometry.Point3d(distFromFloorOrSrf_[0])
except:
points = [rs.coerce3dpoint(x) for x in distFromFloorOrSrf_]
if points[0] is not None:
sectionMethod = 2
else:
sectionMethod = 1
except:
sectionMethod = 1
else: sectionMethod = 0
if sectionMethod == 0:
if distFromFloorOrSrf_:
distFromFloor = floatTest
else:
if rhinoModelUnits == 'Meters':
distFromFloor = 0.9
elif rhinoModelUnits == 'Centimeters':
distFromFloor = 90
elif rhinoModelUnits == 'Millimeters':
distFromFloor = 900
elif rhinoModelUnits == 'Feet':
distFromFloor = 3
elif rhinoModelUnits == 'Inches':
distFromFloor = 72
else:
distFromFloor = 0.1
print "No value connected for distFromFloorOrSrf_. The distance from the floor has been set to " + str(distFromFloor) + " " + rhinoModelUnits + "."
elif sectionMethod == 1:
distFromFloor = None
sectionMesh, sectionBreps = lb_preparation.cleanAndCoerceList(distFromFloorOrSrf_)
else:
distFromFloor = None
sectionBreps = [distFromFloorOrSrf_]
#Check to be sure that none of the zones are having the temperature map generated above them.
checkData2 = True
if _HBZones != []:
if sectionMethod == 0: pass
else:
for zone in _HBZones:
zoneBBox = rc.Geometry.Box(zone.GetBoundingBox(rc.Geometry.Plane.WorldXY))
zDist = zoneBBox.Z[1] - zoneBBox.Z[0]
if zDist > distFromFloor: pass
else: checkData2 = False
if checkData2 == False:
warning = "The distFromFloorOrSrf_ is greater than the height of one or more of the zones. Try decreaseing the value or connecting a custom surface."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
else: checkData2 = False
#Check the viewResolution.
checkData3 = True
if viewResolution_ == None:
viewResolution = 0
print "View resolution has been set to 0 for a fast calculation."
else:
if viewResolution_ <= 4 and viewResolution_ >= 0:
viewResolution = viewResolution_
print "Sky resolution set to " + str(viewResolution)
else:
checkData3 = False
warning = 'Sky resolution must be a value between 0 and 4.'
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
#Check the additionalShading_ and addShdTransmiss_.
checkData5 = True
addShdTransmiss = []
constantTransmis = True
if addShdTransmiss_.BranchCount > 0:
if addShdTransmiss_.BranchCount == 1 and not len(addShdTransmiss_.Branch(0)) == 8760:
addShdTransmissInit = []
for transmiss in addShdTransmiss_.Branch(0):
addShdTransmissInit.append(transmiss)
if len(addShdTransmissInit) == len(additionalShading_):
allGood = True
for transVal in addShdTransmissInit:
transFloat = transVal
if transFloat <= 1.0 and transFloat >= 0.0: addShdTransmiss.append(transFloat)
else: allGood = False
if allGood == False:
checkData5 = False
warning = 'addShdTransmiss_ must be a value between 0 and 1.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
elif len(addShdTransmissInit) == 1:
if addShdTransmissInit[0] <= 1.0 and addShdTransmissInit[0] >= 0.0:
for count in range(len(additionalShading_)):
addShdTransmiss.append(addShdTransmissInit[0])
else:
checkData5 = False
warning = 'addShdTransmiss_ must be a value between 0 and 1.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
checkData5 = False
warning = 'addShdTransmiss_ must be either a list of values that correspond to the number of breps in the additionalShading_ input or a single constant value for all additionalShading_ objects.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
elif addShdTransmiss_.BranchCount > 1 or len(addShdTransmiss_.Branch(0)) == 8760:
if addShdTransmiss_.BranchCount == len(additionalShading_):
constantTransmis = False
for i in range(addShdTransmiss_.BranchCount):
branchList = addShdTransmiss_.Branch(i)
dataVal = []
for item in branchList:
dataVal.append(item)
addShdTransmiss.append(dataVal)
else:
checkData5 = False
warning = 'addShdTransmiss_ data trees must have a number of branches that equals the number of objects in additionaShading_.'
print warning
ghenv.Component.AddRuntimeMessage(gh.GH_RuntimeMessageLevel.Warning, warning)
else:
print "no value connected for addShdTransmiss_. All breps connected to additionalShading_ will be assumed to be completely opaque,"
#Check the removeAirWalls_ option.
if removeAirWalls_ == None: removeInt = True
else: removeInt = removeAirWalls_
#Check the includeOutdoor_ option.
if includeOutdoor_ == None: includeOutdoor = False
else: includeOutdoor = includeOutdoor_
#Do a final check of everything.
if checkData1 == True and checkData2 == True and checkData3 == True and checkData4 == True and checkData5 == True:
checkData = True
else: checkData = False
return checkData, gridSize, distFromFloor, viewResolution, removeInt, sectionMethod, sectionBreps, includeOutdoor, constantTransmis, addShdTransmiss
def createMesh(brep, gridSize):
## mesh breps
def makeMeshFromSrf(i, inputBrep):
try:
mesh[i] = rc.Geometry.Mesh.CreateFromBrep(inputBrep, meshParam)[0]
inputBrep.Dispose()
except:
print 'Error in converting Brep to Mesh...'
pass
# prepare bulk list for each surface
mesh = [None] * len(brep)
# set-up mesh parameters for each surface based on surface size
meshParam = rc.Geometry.MeshingParameters.Default
meshParam.MaximumEdgeLength = gridSize
meshParam.MinimumEdgeLength = gridSize
meshParam.GridAspectRatio = 1
for i in range(len(mesh)): makeMeshFromSrf(i, brep[i])
return mesh
def createMeshFromPoints(points, gridSize):
pointBreps = []
pointMesh = rc.Geometry.Mesh()
initPts = [rc.Geometry.Point3d(-gridSize/2, gridSize/2, 0),
rc.Geometry.Point3d(gridSize/2, gridSize/2, 0),
rc.Geometry.Point3d(gridSize/2, -gridSize/2, 0),
rc.Geometry.Point3d(-gridSize/2, -gridSize/2, 0)]
try:
points = [rs.coerce3dpoint(x) for x in points]
except:
pass
for pt in points:
brepInit = rc.Geometry.Brep.CreateFromCornerPoints(initPts[0], initPts[1], initPts[2], initPts[3], sc.doc.ModelAbsoluteTolerance)
moveTrans = rc.Geometry.Transform.Translation(pt.X, pt.Y, pt.Z)
brepInit.Transform(moveTrans)
pointBreps.append(brepInit)
meshedBrep = rc.Geometry.Mesh.CreateFromBrep(brepInit)[0]
pointMesh.Append(meshedBrep)
return pointMesh, pointBreps, points
def constructNewMesh(finalFaceBreps):
finalMesh = rc.Geometry.Mesh()
for brepCt, brep in enumerate(finalFaceBreps):
brepVerts = brep.DuplicateVertices()
if len(brepVerts) == 4:
facePt1 = rc.Geometry.Point3d(brepVerts[0])
facePt2 = rc.Geometry.Point3d(brepVerts[1])
facePt3 = rc.Geometry.Point3d(brepVerts[2])
facePt4 = rc.Geometry.Point3d(brepVerts[3])
meshFacePts = [facePt1, facePt2, facePt3, facePt4]
mesh = rc.Geometry.Mesh()
for point in meshFacePts:
mesh.Vertices.Add(point)
mesh.Faces.AddFace(0, 1, 2, 3)
finalMesh.Append(mesh)
else:
facePt1 = rc.Geometry.Point3d(brepVerts[0])
facePt2 = rc.Geometry.Point3d(brepVerts[1])
facePt3 = rc.Geometry.Point3d(brepVerts[2])
meshFacePts = [facePt1, facePt2, facePt3]
mesh = rc.Geometry.Mesh()
for point in meshFacePts:
mesh.Vertices.Add(point)
mesh.Faces.AddFace(0, 1, 2)
finalMesh.Append(mesh)
return finalMesh
def prepareGeometry(gridSize, distFromFloor, removeInt, sectionMethod, sectionBreps, includeOutdoor, constantTransmis, addShdTransmiss, hb_zoneData):
#Separate the HBZoneData.
zoneBreps = hb_zoneData[0]
surfaceNames = hb_zoneData[1]
zoneSrfs = hb_zoneData[2]
zoneSrfTypes = hb_zoneData[4]
srfInteriorList = hb_zoneData[5]
zoneNames = hb_zoneData[6]
zoneNatVentArea = hb_zoneData[7]
zoneVolumes = hb_zoneData[8]
srfAirWallAdjList = hb_zoneData[9]
windowSrfTransmiss = hb_zoneData[11]
modelHasIntWindows = hb_zoneData[12]
srfInteriorWindowList = hb_zoneData[13]
srfIntWindowAdjNumList = hb_zoneData[14]
zoneFloorReflect = hb_zoneData[15]
#Make copies of the original zones in the event that some are combined as air walls are removed.
oldZoneBreps = zoneBreps[:]
oldZoneSrfs = zoneSrfs[:]
oldZoneSrfTypes = zoneSrfTypes[:]
oldSrfInteriorWindowList = srfInteriorWindowList[:]
#Set meshing parameters to be used throughout the function.
srfMeshPar = rc.Geometry.MeshingParameters.Coarse
#Create the lists that will be filled.
geoCheck = True
testPts = []
MRTMeshBreps = []
MRTMeshInit = []
zoneSrfsMesh = []
zoneWires = []
zoneOpaqueMesh = []
zoneWindowMesh = []
zoneWindowTransmiss = []
zoneWindowNames = []
zoneHasWindows = []
zoneWeights = []
zoneInletParams = []
zoneBrepsNonSolid = []
continuousDaylitVols = []
outdoorPtHeightWeights = []
#Make lists to keep track of all deleted faces to use if there are some parts of the connected surface that lie completely outside of the zone.
allDeletedFaces = []
deletedFaceBreps = []
deletedTestPts = []
if sectionMethod != 0 and includeOutdoor == True:
for sect in sectionBreps:
allDeletedFaces.append([])
deletedFaceBreps.append([])
deletedTestPts.append([])
#If there is additional shading, check to be sure that the number of faces in each brep is 1.
additionalShading = []
newAddShdTransmiss = []
if additionalShading_ != []:
for shdCount, shdBrep in enumerate(additionalShading_):
if shdBrep.Faces.Count == 1:
additionalShading.append(shdBrep)
if addShdTransmiss != []: newAddShdTransmiss.append(addShdTransmiss[shdCount])
else:
for face in shdBrep.Faces:
try:
additionalShading.append(rc.Geometry.BrepFace.ToBrep(face))
except:
if face.IsQuad:
additionalShading.append(rc.Geometry.Brep.CreateFromCornerPoints(rc.Geometry.Point3d(shdBrep.Vertices[face.A]), rc.Geometry.Point3d(shdBrep.Vertices[face.B]), rc.Geometry.Point3d(shdBrep.Vertices[face.C]), rc.Geometry.Point3d(shdBrep.Vertices[face.D]), sc.doc.ModelAbsoluteTolerance))
else:
additionalShading.append(rc.Geometry.Brep.CreateFromCornerPoints(rc.Geometry.Point3d(shdBrep.Vertices[face.A]), rc.Geometry.Point3d(shdBrep.Vertices[face.B]), rc.Geometry.Point3d(shdBrep.Vertices[face.C]), sc.doc.ModelAbsoluteTolerance))
if addShdTransmiss != []: newAddShdTransmiss.append(addShdTransmiss[shdCount])
addShdTransmiss = newAddShdTransmiss
#Write a function to split breps with the zone and pull out the correctly split surface.
def splitOffsetFloor(brep, zone):
splitBrep = rc.Geometry.Brep.Split(brep, zone, tol)
distToCent = []
for element in splitBrep:
distToCent.append(rc.Geometry.Point3d.DistanceTo(rc.Geometry.AreaMassProperties.Compute(element).Centroid, rc.Geometry.AreaMassProperties.Compute(zone).Centroid))
try:
distToCent, splitBrep = zip(*sorted(zip(distToCent, splitBrep)))
finalBrep = splitBrep[0]
except:
finalBrep = brep
return finalBrep
#If interior walls have ben removed, see which surfaces are adjacent and re-make the lists fo zones.
if removeInt == True:
#Make a function to remove duplicates from a list.
def removeDup(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x))]
#Make a function to tell if all items in a list are the same.
def allSame(items):
return all(x == items[0] for x in items)
#Make blank lists that will re-create the original lists.
newZoneBreps = []
newSurfaceNames = []
newZoneSrfs = []
newZoneSrfTypes = []
newZoneSolidSrfs = []
newWindowSrfTransmiss = []
newSrfIntWindowAdjNumList = []
newFlrRefList = []
newZoneFloorReflect = []
newZoneFloorSrfs = []
#Write a function that solves for the connections in a network of zones (needed to identify air wall and interior window networks).
def FindAdjNetwork(adjDataTree):
adjacentList = []
totalAdjList = []
for zoneCount, srfList in enumerate(zoneSrfs):
if allSame(adjDataTree[zoneCount]) == False:
for srfCount, srf in enumerate(srfList):
if adjDataTree[zoneCount][srfCount] != None:
if len(adjacentList) == 0:
adjacentList.append([zoneCount])
else:
#Find the adjacent zone.
adjSrf = adjDataTree[zoneCount][srfCount]
for zoneCount2, srfList in enumerate(surfaceNames):
for srfName in srfList:
if adjSrf == srfName: adjZone = zoneCount2
#Have a value to keep track of whether a match has been found for a zone.
matchFound = False
#Check the current adjacencies list to find out where to place the zone.
for zoneAdjListCount, zoneAdjList in enumerate(adjacentList):
#Maybe we already have both of the zones as adjacent.
if zoneCount in zoneAdjList and adjZone in zoneAdjList:
matchFound = True
#If we have the zone but not the adjacent zone, append it to the list.
elif zoneCount in zoneAdjList and adjZone not in zoneAdjList:
adjacentList[zoneAdjListCount].append(adjZone)
matchFound = True
#If we have the adjacent zone but not the zone itself, append it to the list.
elif zoneCount not in zoneAdjList and adjZone in zoneAdjList:
adjacentList[zoneAdjListCount].append(zoneCount)
matchFound = True
else: pass
#If no match was found, start a new list.
if matchFound == False:
adjacentList.append([zoneCount])
else:
#The zone is not adjacent to any other zones so we will put it in its own list.
adjacentList.append([zoneCount])
#Remove duplicates found in the process of looking for adjacencies.
fullAdjacentList = []
newAjdacenList = []
for listCount, zoneList in enumerate(adjacentList):
good2Go = True
listCheck = []
notAccountedForCheck = []
#Check if the zones are already accounted for
for zoneNum in zoneList:
if zoneNum in fullAdjacentList: listCheck.append(zoneNum)
else: notAccountedForCheck.append(zoneNum)
if len(listCheck) == len(zoneList):
#All zones in the list are already accounted for.
good2Go = False
if good2Go == True and len(listCheck) == 0:
#All of the zones in the list are not yet accounted for.
newAjdacenList.append(zoneList)
fullAdjacentList.extend(adjacentList[listCount])
elif good2Go == True:
#Find the existing zone list that contains the duplicates and append the non-duplicates to the list.
for val in listCheck:
for existingListCount, existingList in enumerate(newAjdacenList):
if val in existingList: thisIsTheList = existingListCount
newAjdacenList[thisIsTheList].extend(notAccountedForCheck)
fullAdjacentList.extend(notAccountedForCheck)
return newAjdacenList
#Calculate the air wall adjacencies.
adjacentList = FindAdjNetwork(srfInteriorList)
#If there are interior windows, use the AdjNetwork function to find the continuously daylit spaces.
finaldaylitAdjList = []
if modelHasIntWindows == True:
daylitAdjList = FindAdjNetwork(srfInteriorWindowList)
for airCount, airAdjList in enumerate(adjacentList):
finaldaylitAdjList.append([])
for windowList in daylitAdjList:
for airCount2, airAdjList2 in enumerate(adjacentList):
if airAdjList2[0] in windowList: finaldaylitAdjList[airCount].append(airCount2)
#Create a new "super zone" from the zones that are continuously connected by air walls.
for listCount, list in enumerate(adjacentList):
listMarker = len(newSurfaceNames)
newSurfaceNames.append([])
newZoneSrfs.append([])
newZoneSolidSrfs.append([])
newZoneSrfTypes.append([])
zoneBrepsNonSolid.append([])
newWindowSrfTransmiss.append([])
newSrfIntWindowAdjNumList.append([])
newFlrRefList.append([])
newZoneFloorReflect.append([])
for zoneCount in list:
for srfCount, surface in enumerate(zoneSrfs[zoneCount]):
if srfInteriorList[zoneCount][srfCount] == None and srfAirWallAdjList[zoneCount][srfCount] not in list:
newZoneSolidSrfs[listMarker].append(surface)
newZoneSrfs[listMarker].append(surface)
newSurfaceNames[listMarker].append(surfaceNames[zoneCount][srfCount])
newZoneSrfTypes[listMarker].append(zoneSrfTypes[zoneCount][srfCount])
newWindowSrfTransmiss[listMarker].append(windowSrfTransmiss[zoneCount][srfCount])
newSrfIntWindowAdjNumList[listMarker].append(srfIntWindowAdjNumList[zoneCount][srfCount])
newZoneFloorReflect[listMarker].append(zoneFloorReflect[zoneCount][srfCount])
elif srfInteriorList[zoneCount][srfCount] == None and srfAirWallAdjList[zoneCount][srfCount] in list:
newZoneSrfs[listMarker].append(surface)
newSurfaceNames[listMarker].append(surfaceNames[zoneCount][srfCount])
newZoneSrfTypes[listMarker].append(zoneSrfTypes[zoneCount][srfCount])
newWindowSrfTransmiss[listMarker].append(windowSrfTransmiss[zoneCount][srfCount])
newSrfIntWindowAdjNumList[listMarker].append(srfIntWindowAdjNumList[zoneCount][srfCount])
newZoneFloorReflect[listMarker].append(zoneFloorReflect[zoneCount][srfCount])
joinedBrep = rc.Geometry.Brep.JoinBreps(newZoneSolidSrfs[listMarker], tol)
zoneBrepsNonSolid[listCount].extend(joinedBrep)
newZoneBreps.append(joinedBrep[0])
#Remember to take the roofs and their reflectivity for possible outdoor solar radiation calculations.
newZoneFloorReflect.append(zoneFloorReflect[-1])
zoneBreps = newZoneBreps
surfaceNames = newSurfaceNames
zoneSrfs = newZoneSrfs
zoneSrfTypes = newZoneSrfTypes
windowSrfTransmiss = newWindowSrfTransmiss
srfIntWindowAdjNumList = newSrfIntWindowAdjNumList
zoneFloorReflect = newZoneFloorReflect
else:
for brep in zoneBreps:
zoneBrepsNonSolid.append([brep])
#Make sure that the zone volumes are closed.
for brepCount, brep in enumerate(zoneBreps):
if zoneBrepsNonSolid[brepCount][0].IsSolid: pass
else:
edgeCrv = rc.Geometry.Brep.DuplicateEdgeCurves(brep, True)
buggyEdge = False
for crv in edgeCrv:
if crv.SpanCount == 1:
buggyEdge = True
if buggyEdge == False:
geoCheck = False
warning = "Getting rid of interior walls has caused the connected zone geometry to not be closed. Make sure that you do not have an airwall bounding the outdoors and, if not, make sure that all zones of your building are connected here."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
else:
geoCheck = False
warning = "One of your continuous closed air volumes has an overlapping edge that is causing it to not read as a solid. \n Bake the closedAirVolumes output and do a DupBorder command on the polysurface to see the buggy edge. \n Rhino's solid operations are buggy. Hopefully McNeel fix this one soon."
print warning
ghenv.Component.AddRuntimeMessage(w, warning)
finalSrfTypes = zoneSrfTypes[:]
if geoCheck == True:
#Check the section method and use this to decide whether to mesh the test surfaces now.
if sectionMethod == 1:
allTestPts = []
allFaceBreps = []
finalBreps = [sectionBreps]
for brep in finalBreps:
finalMesh = createMesh(brep, gridSize)
for meshCount, mesh in enumerate(finalMesh):
allTestPts.append([])
allFaceBreps.append([])
for faceCount, face in enumerate(mesh.Faces):
if face.IsQuad:
faceBrep = rc.Geometry.Brep.CreateFromCornerPoints(rc.Geometry.Point3d(mesh.Vertices[face.A]), rc.Geometry.Point3d(mesh.Vertices[face.B]), rc.Geometry.Point3d(mesh.Vertices[face.C]), rc.Geometry.Point3d(mesh.Vertices[face.D]), sc.doc.ModelAbsoluteTolerance)
if face.IsTriangle:
faceBrep = rc.Geometry.Brep.CreateFromCornerPoints(rc.Geometry.Point3d(mesh.Vertices[face.A]), rc.Geometry.Point3d(mesh.Vertices[face.B]), rc.Geometry.Point3d(mesh.Vertices[face.C]), sc.doc.ModelAbsoluteTolerance)
try:
centPt = rc.Geometry.AreaMassProperties.Compute(faceBrep).Centroid
allTestPts[meshCount].append(centPt)
allFaceBreps[meshCount].append(faceBrep)
except:
pass
elif sectionMethod == 2:
pointMesh, pointBreps, sectionBreps = createMeshFromPoints(sectionBreps[0], gridSize)
finalMesh = [pointMesh]
allTestPts = [sectionBreps]
allFaceBreps = [pointBreps]
for zoneCount, srfList in enumerate(zoneSrfs):
#Extract the wireframe.
wireFrame = zoneBreps[zoneCount].DuplicateEdgeCurves()
for crv in wireFrame:
zoneWires.append(crv)
#Add lists to the final lists.
testPts.append([])
MRTMeshBreps.append([])
MRTMeshInit.append([])
zoneSrfsMesh.append([])
if sectionMethod == 0:
#Select out just the floor geometry.
floorBreps = []
for srfCount, srf in enumerate(srfList):
zoneSrfsMesh[zoneCount].append(rc.Geometry.Mesh.CreateFromBrep(srf, srfMeshPar)[0])
if zoneSrfTypes[zoneCount][srfCount] == 2 or zoneSrfTypes[zoneCount][srfCount] == 2.25 or zoneSrfTypes[zoneCount][srfCount] == 2.5 or zoneSrfTypes[zoneCount][srfCount] == 2.75:
floorBreps.append(srf)
#If there are multiple floor breps, join them together.
dataCheck = True
if len(floorBreps) > 0: floorBrep = floorBreps
else:
dataCheck = False
floorBrep = []
#Move the surface upwards by the offsetDist and keep track of planarity.
floorBrepsMoved = []
translation = rc.Geometry.Transform.Translation(0,0,distFromFloor)
planarList = []
if len(floorBrep) == 1: isPlanar = True
else: isPlanar = False
for brep in floorBrep:
for face in brep.Faces:
if not face.IsPlanar: isPlanar = False
planarList.append(isPlanar)
newBrep = brep.Duplicate()
newBrep.Transform(translation)
floorBrepsMoved.append(newBrep)
finalBreps = []
for count, brep in enumerate(floorBrepsMoved):
#If the surface is planar, intersect the surface with the walls of the zone and rebuild the surface from the curve.
if planarList[count] == True:
intersectLineList = rc.Geometry.Intersect.Intersection.BrepBrep(brep, zoneBreps[zoneCount], tol)[1]
try: intersectLineList = rc.Geometry.Curve.JoinCurves(intersectLineList, tol)
except: pass
if len(intersectLineList) == 1:
if intersectLineList[0].IsClosed:
finalBrep = rc.Geometry.Brep.CreatePlanarBreps(intersectLineList[0])
finalBreps.append(finalBrep)
else:
finalBrepInit = splitOffsetFloor(brep, zoneBreps[zoneCount])
edges = finalBrepInit.DuplicateEdgeCurves()
joinedEdges = rc.Geometry.Curve.JoinCurves(edges, tol)
finalBrep = rc.Geometry.Brep.CreatePlanarBreps(joinedEdges[0])
finalBreps.append(finalBrep)
elif len(intersectLineList) > 0:
finalBrepInit = splitOffsetFloor(brep, zoneBreps[zoneCount])
edges = finalBrepInit.DuplicateEdgeCurves()
joinedEdges = rc.Geometry.Curve.JoinCurves(edges, tol)
finalBrep = rc.Geometry.Brep.CreatePlanarBreps(joinedEdges[0])
finalBreps.append(finalBrep)
else:
#The intersection failed. Just take the floors as they are.
finalBreps.append([brep])
else:
#If the surface is curved or has multiple elements, try to trim it with the closed zone brep.
try:
finalBrep = splitOffsetFloor(brep, zoneBreps[zoneCount])
finalBreps.append([finalBrep])
except:
finalBreps.append([brep])
else:
for srfCount, srf in enumerate(srfList):
zoneSrfsMesh[zoneCount].append(rc.Geometry.Mesh.CreateFromBrep(srf, srfMeshPar)[0])
#Generate the meshes and test points of the final surface.
if sectionMethod == 0:
for brep in finalBreps:
finalMesh = createMesh(brep, gridSize)
for meshCount, mesh in enumerate(finalMesh):
finalTestPts = []
finalFaceBreps = []
deleteIndices = []
deleteTestPts = []
deleteFaceBreps = []
for faceCount, face in enumerate(mesh.Faces):
if face.IsQuad:
faceBrep = rc.Geometry.Brep.CreateFromCornerPoints(rc.Geometry.Point3d(mesh.Vertices[face.A]), rc.Geometry.Point3d(mesh.Vertices[face.B]), rc.Geometry.Point3d(mesh.Vertices[face.C]), rc.Geometry.Point3d(mesh.Vertices[face.D]), sc.doc.ModelAbsoluteTolerance)
if face.IsTriangle:
faceBrep = rc.Geometry.Brep.CreateFromCornerPoints(rc.Geometry.Point3d(mesh.Vertices[face.A]), rc.Geometry.Point3d(mesh.Vertices[face.B]), rc.Geometry.Point3d(mesh.Vertices[face.C]), sc.doc.ModelAbsoluteTolerance)
try:
centPt = rc.Geometry.AreaMassProperties.Compute(faceBrep).Centroid
#Do a final check to be sure that the test point does not lie outside the zone and, if so, delete the mesh face, and don't append the point.
if zoneBreps[zoneCount].IsPointInside(centPt, tol, False) == False:
deleteIndices.append(faceCount)
deleteFaceBreps.append(faceBrep)
deleteTestPts.append(centPt)
else:
finalFaceBreps.append(faceBrep)
finalTestPts.append(centPt)
except:
pass
#Construct a new mesh from the breps that are inside each zone.
finalMesh = constructNewMesh(finalFaceBreps)
if len(finalTestPts) > 0:
if len(MRTMeshInit[zoneCount]) > 0: MRTMeshInit[zoneCount][0].Append(finalMesh)
else: MRTMeshInit[zoneCount].append(finalMesh)
MRTMeshBreps[zoneCount].extend(finalFaceBreps)
testPts[zoneCount].extend(finalTestPts)
else:
for meshCount, allPtList in enumerate(allTestPts):
finalTestPts = []
finalFaceBreps = []
deleteIndices = []
deleteTestPts = []
deleteFaceBreps = []
for ptCount, meshPoint in enumerate(allPtList):
#Do a final check to be sure that the test point does not lie outside the zone and, if so, delete the mesh face, and don't append the point.
if zoneBreps[zoneCount].IsPointInside(meshPoint, tol, False) == False:
deleteIndices.append(ptCount)
deleteFaceBreps.append(allFaceBreps[meshCount][ptCount])
deleteTestPts.append(meshPoint)
else:
finalFaceBreps.append(allFaceBreps[meshCount][ptCount])
finalTestPts.append(meshPoint)
#Append the deleted faces to the list.
if includeOutdoor == True:
allDeletedFaces[meshCount].append(deleteIndices)
deletedFaceBreps[meshCount].append(deleteFaceBreps)
deletedTestPts[meshCount].append(deleteTestPts)
#Construct a new mesh from the breps that are inside each zone.
finalMesh = constructNewMesh(finalFaceBreps)
if len(finalTestPts) > 0:
MRTMeshInit[zoneCount].append(finalMesh)
MRTMeshBreps[zoneCount].extend(finalFaceBreps)
testPts[zoneCount].extend(finalTestPts)
#If the user has selected to use the results for an outdoor calculation, pull out those parts of the mesh related to the outdoors using the deletedIndices list.
if sectionMethod != 0 and includeOutdoor == True:
outdoorTestPts = []
outdoorFaceBreps = []
for testSrfCount, testSrf in enumerate(allDeletedFaces):
baseDelIndices = testSrf[0]
totalTests = len(testSrf)
indexCount = []
for indCt, index in enumerate(baseDelIndices):
indexCount.append([])
for othDelIndices in testSrf:
if index in othDelIndices: indexCount[indCt].append(1)
if sum(indexCount[indCt]) == totalTests:
outdoorTestPts.append(deletedTestPts[testSrfCount][0][indCt])
outdoorFaceBreps.append(deletedFaceBreps[testSrfCount][0][indCt])
#Construct a new mesh from the breps that are inside each zone.
outdoorMesh = constructNewMesh(outdoorFaceBreps)
#Append outdoor meshes to the complete list.
if len(outdoorTestPts) > 0:
MRTMeshInit.append([outdoorMesh])
MRTMeshBreps.append(outdoorFaceBreps)
testPts.append(outdoorTestPts)
else:
includeOutdoor = False
#Make a list of all surfaces for the viewFactor calculation of the outdoor mesh.
zoneSrfsMeshOutdoor = []
surfaceNamesOutdoor = []
surfaceTypesOutdoor = []
zoneOpaqueMeshOutdoor = []
zoneTranspMeshOutdoor = []
zoneTranspMeshTransmiss = []
zoneTranspMeshSrfName = []
for zoneSrfListCount, zoneSrfList in enumerate(zoneSrfs):
for srfName in surfaceNames[zoneSrfListCount]:
surfaceNamesOutdoor.append(srfName)
for srfType in finalSrfTypes[zoneSrfListCount]:
surfaceTypesOutdoor.append(srfType)
for srfCount, srf in enumerate(zoneSrfList):
srfMesh = rc.Geometry.Mesh.CreateFromBrep(srf, srfMeshPar)[0]
zoneSrfsMeshOutdoor.append(srfMesh)
if finalSrfTypes[zoneSrfListCount][srfCount] != 5:
zoneOpaqueMeshOutdoor.append(srfMesh)
else:
zoneTranspMeshOutdoor.append(srfMesh)
zoneTranspMeshTransmiss.append(windowSrfTransmiss[zoneSrfListCount][srfCount])
zoneTranspMeshSrfName.append(surfaceNames[zoneSrfListCount][srfCount])
zoneSrfsMesh.append(zoneSrfsMeshOutdoor)
surfaceNames.append(surfaceNamesOutdoor)
finalSrfTypes.append(surfaceTypesOutdoor)
#Make a list for the weighting of each zone value for the air temperature calculation.
zoneWeights = []
heightWeights = []
for falseZoneCount, falseZone in enumerate(testPts):
if sectionMethod != 0 and includeOutdoor == True:
if falseZoneCount != len(testPts)-1:
zoneWeights.append([])
heightWeights.append([])
for point in falseZone:
zoneWeights[falseZoneCount].append([])
else:
zoneWeights.append([])
heightWeights.append([])
for point in falseZone:
zoneWeights[falseZoneCount].append([])
if removeInt == True:
#Get the centroids of each zone, which will represent the air node of the zone.
zoneCentroids = []
for oirignalZone in oldZoneBreps:
centPt = rc.Geometry.VolumeMassProperties.Compute(oirignalZone).Centroid
zoneCentroids.append(centPt)
#For each of the test points, weight them based on the zone they belong to.
for falseZoneCount, falseZone in enumerate(testPts):
if sectionMethod != 0 and includeOutdoor == True:
if falseZoneCount != len(testPts)-1:
for pointCount, point in enumerate(falseZone):
initPointWeights = []
for orignalZoneCount, oirignalZoneCent in enumerate(zoneCentroids):
if orignalZoneCount in adjacentList[falseZoneCount]:
ptDistance = rc.Geometry.Point3d.DistanceTo(point, oirignalZoneCent)
ptWeight = 1/(ptDistance*ptDistance)
initPointWeights.append(ptWeight)
else:
initPointWeights.append(0)
for weight in initPointWeights:
zoneWeights[falseZoneCount][pointCount].append(weight/sum(initPointWeights))
else:
for pointCount, point in enumerate(falseZone):
initPointWeights = []
for orignalZoneCount, oirignalZoneCent in enumerate(zoneCentroids):
if orignalZoneCount in adjacentList[falseZoneCount]:
ptDistance = rc.Geometry.Point3d.DistanceTo(point, oirignalZoneCent)
ptWeight = 1/(ptDistance*ptDistance)
initPointWeights.append(ptWeight)
else:
initPointWeights.append(0)
for weight in initPointWeights:
zoneWeights[falseZoneCount][pointCount].append(weight/sum(initPointWeights))
else:
#For each of the test points, give them a weight totalling to 1 based on which zone they belong to.
for falseZoneCount, falseZone in enumerate(testPts):
if sectionMethod != 0 and includeOutdoor == True:
if falseZoneCount != len(testPts)-1:
for pointCount, point in enumerate(falseZone):
for orignalZoneCount, oirignalZone in enumerate(oldZoneBreps):
if oirignalZone.IsPointInside(point, tol, False) == True: zoneWeights[falseZoneCount][pointCount].append(1)
else: zoneWeights[falseZoneCount][pointCount].append(0)
else:
for pointCount, point in enumerate(falseZone):
for orignalZoneCount, oirignalZone in enumerate(oldZoneBreps):
if oirignalZone.IsPointInside(point, tol, False) == True: zoneWeights[falseZoneCount][pointCount].append(1)
else: zoneWeights[falseZoneCount][pointCount].append(0)
#Calculate height weights for each of the points.
for falseZoneCount, falseZone in enumerate(testPts):
if sectionMethod != 0 and includeOutdoor == True:
if falseZoneCount != len(testPts)-1:
zoneBB = rc.Geometry.Brep.GetBoundingBox(zoneBreps[falseZoneCount], rc.Geometry.Plane.WorldXY)
max = zoneBB.Max.Z
min = zoneBB.Min.Z
difference = max-min
for pointCount, point in enumerate(falseZone):
heightWeight = (point.Z-min)/difference
heightWeights[falseZoneCount].append(heightWeight)
else:
zoneBB = rc.Geometry.Brep.GetBoundingBox(zoneBreps[falseZoneCount], rc.Geometry.Plane.WorldXY)
max = zoneBB.Max.Z
min = zoneBB.Min.Z
difference = max-min
for pointCount, point in enumerate(falseZone):
heightWeight = (point.Z-min)/difference
heightWeights[falseZoneCount].append(heightWeight)
#Calculate the heights of the original zones and the average heights of the windows (to be used in the stratification calculation).
for orignalZoneCount, oirignalZone in enumerate(oldZoneBreps):
zoneInletParams.append([])
#Calculate the total height of the zone.
zoneBB = rc.Geometry.Brep.GetBoundingBox(oirignalZone, rc.Geometry.Plane.WorldXY)
zoneInletParams[orignalZoneCount].append(zoneBB.Min.Z)
zoneInletParams[orignalZoneCount].append(zoneBB.Max.Z)
#Calculate the heights from the floor to the window mid-plane (do this only for the windows along the walls and for exterior windows).
zoneGlzMesh = rc.Geometry.Mesh()
glzTracker = 0
for srfCt, srf in enumerate(oldZoneSrfs[orignalZoneCount]):
if oldZoneSrfTypes[orignalZoneCount][srfCt] == 5 and oldZoneSrfTypes[orignalZoneCount][srfCt-1] == 0 and oldSrfInteriorWindowList[orignalZoneCount][srfCt] == None:
zoneGlzMesh.Append(rc.Geometry.Mesh.CreateFromBrep(srf, srfMeshPar)[0])
glzTracker += 1
if glzTracker != 0:
glzBB = rc.Geometry.Brep.GetBoundingBox(zoneGlzMesh, rc.Geometry.Plane.WorldXY)
glzMinHeight = glzBB.Min.Z
glzCentPt = rc.Geometry.AreaMassProperties.Compute(zoneGlzMesh).Centroid
glzMidHeight = glzCentPt.Z
zoneInletParams[orignalZoneCount].append((glzMidHeight + glzMinHeight)/2) #Take the average height of the lower half of the glazing.
else: zoneInletParams[orignalZoneCount].append(None)
#Get the volume of each zone.
zoneInletParams[orignalZoneCount].append(zoneVolumes[orignalZoneCount])
#Get the area of operable glazing.
opGlzArea = 0
for val in zoneNatVentArea[orignalZoneCount]:
if val != None: opGlzArea += float(val)
zoneInletParams[orignalZoneCount].append(opGlzArea)
# Pull out the geometry that can block sun vectors for the solar adjusted MRT calculation.
for zCount, zoneSrfTList in enumerate(zoneSrfTypes):
zoneOpaqueMesh.append([])
zoneWindowMesh.append([])
zoneWindowTransmiss.append([])
zoneWindowNames.append([])
#First add in any additional shading to the list.
hasWindows = 0
if additionalShading != [] and addShdTransmiss == []:
for item in additionalShading:
opaqueMesh = rc.Geometry.Mesh.CreateFromBrep(item, rc.Geometry.MeshingParameters.Coarse)[0]
zoneOpaqueMesh[zCount].append(opaqueMesh)
elif additionalShading != []:
hasWindows = 1
for itemCount, item in enumerate(additionalShading):
transpMesh = rc.Geometry.Mesh.CreateFromBrep(item, rc.Geometry.MeshingParameters.Coarse)[0]
zoneWindowMesh[zCount].append(transpMesh)
zoneWindowTransmiss[zCount].append(addShdTransmiss[itemCount])
zoneWindowNames[zCount].append('AddShd' + str(itemCount))
#Now, pull out all of the zones opaque and transparent geometry.
for sCount, srfT in enumerate(zoneSrfTList):
if srfT != 5:
opaqueMesh = rc.Geometry.Mesh.CreateFromBrep(zoneSrfs[zCount][sCount], rc.Geometry.MeshingParameters.Coarse)[0]
zoneOpaqueMesh[zCount].append(opaqueMesh)
else:
hasWindows = 1
windowMesh = rc.Geometry.Mesh.CreateFromBrep(zoneSrfs[zCount][sCount], rc.Geometry.MeshingParameters.Coarse)[0]
zoneWindowMesh[zCount].append(windowMesh)
zoneWindowTransmiss[zCount].append(windowSrfTransmiss[zCount][sCount])
zoneWindowNames[zCount].append(surfaceNames[zCount][sCount])
zoneHasWindows.append(hasWindows)
#If there are interior windows, be sure to add the geometry of the other contiuously lit zones to the opaque and window geometry lists.
if modelHasIntWindows == True:
for contLightCount, contLightZone in enumerate(finaldaylitAdjList[zCount]):
intWindowCentroids = []
if contLightZone != zCount:
for sCount2, srfT2 in enumerate(zoneSrfTypes[contLightZone]):
if srfT2 != 5:
opaqueMesh = rc.Geometry.Mesh.CreateFromBrep(zoneSrfs[contLightZone][sCount2], rc.Geometry.MeshingParameters.Coarse)[0]
zoneOpaqueMesh[zCount].append(opaqueMesh)
elif srfIntWindowAdjNumList[contLightZone][sCount2] != None:
#Check to see if the interior window is already in the list before deciding whether to add it.
alreadyThere = False
srfCentroid = rc.Geometry.AreaMassProperties.Compute(zoneSrfs[contLightZone][sCount2]).Centroid
for centroid in intWindowCentroids:
if centroid.X < srfCentroid.X+tol and centroid.X > srfCentroid.X-tol and centroid.Y < srfCentroid.Y+tol and centroid.Y > srfCentroid.Y-tol and centroid.Z < srfCentroid.Z+tol and centroid.Z > srfCentroid.Z-tol: pass
else:
windowMesh = rc.Geometry.Mesh.CreateFromBrep(zoneSrfs[contLightZone][sCount2], rc.Geometry.MeshingParameters.Coarse)[0]
zoneWindowMesh[zCount].append(windowMesh)
zoneWindowTransmiss[zCount].append(windowSrfTransmiss[contLightZone][sCount2])
intWindowCentroids.append(srfCentroid)
zoneWindowNames[zCount].append(surfaceNames[contLightZone][sCount2])
else:
windowMesh = rc.Geometry.Mesh.CreateFromBrep(zoneSrfs[contLightZone][sCount2], rc.Geometry.MeshingParameters.Coarse)[0]
zoneWindowMesh[zCount].append(windowMesh)
zoneWindowTransmiss[zCount].append(windowSrfTransmiss[contLightZone][sCount2])
intWindowCentroids.append(rc.Geometry.AreaMassProperties.Compute(zoneSrfs[contLightZone][sCount2]).Centroid)
zoneWindowNames[zCount].append(surfaceNames[contLightZone][sCount2])
else:
for sCount2, srfT2 in enumerate(zoneSrfTypes[contLightZone]):
if srfT2 == 5 and srfIntWindowAdjNumList[contLightZone][sCount2] != None: intWindowCentroids.append(rc.Geometry.AreaMassProperties.Compute(zoneSrfs[contLightZone][sCount2]).Centroid)
#If there are outdoor points included in the calculation, add them to the zoneOpaqueMesh.
if sectionMethod != 0 and includeOutdoor == True:
outdoorHasWindows = 2
if additionalShading != [] and addShdTransmiss == []:
for item in additionalShading:
opaqueMesh = rc.Geometry.Mesh.CreateFromBrep(item, rc.Geometry.MeshingParameters.Coarse)[0]
zoneOpaqueMeshOutdoor.append(opaqueMesh)
elif additionalShading != []:
outdoorHasWindows = 1
for itemCount, item in enumerate(additionalShading):
transpMesh = rc.Geometry.Mesh.CreateFromBrep(item, rc.Geometry.MeshingParameters.Coarse)[0]
zoneTranspMeshOutdoor.append(transpMesh)
if constantTransmis == True: zoneTranspMeshTransmiss.append(addShdTransmiss[itemCount])
else: zoneTranspMeshTransmiss.append(1)
zoneTranspMeshSrfName.append('AddShd' + str(itemCount))
zoneOpaqueMesh.append(zoneOpaqueMeshOutdoor)
zoneHasWindows.append(outdoorHasWindows)
zoneWindowMesh.append(zoneTranspMeshOutdoor)
zoneWindowTransmiss.append(zoneTranspMeshTransmiss)
zoneWindowNames.append(zoneTranspMeshSrfName)
#Get the absolute heights of the outdoor points in order to factor them in correctly in the wind speed calculation.
for point in testPts[-1]:
if point.Z >= 0: outdoorPtHeightWeights.append(point.Z)
else: outdoorPtHeightWeights.append(0)
#Add the additional shading to the wireframe.
if additionalShading != []:
for item in additionalShading:
wireFrame = item.DuplicateEdgeCurves()
for crv in wireFrame:
zoneWires.append(crv)
return geoCheck, testPts, MRTMeshBreps, MRTMeshInit, zoneWires, zoneSrfsMesh, surfaceNames, zoneOpaqueMesh, zoneNames, zoneWeights, heightWeights, zoneInletParams, zoneHasWindows, zoneBrepsNonSolid, includeOutdoor, zoneWindowMesh, zoneWindowTransmiss, outdoorPtHeightWeights, zoneWindowNames, zoneFloorReflect, finalSrfTypes, addShdTransmiss
else:
return geoCheck, testPts, MRTMeshBreps, MRTMeshInit, zoneWires, zoneSrfsMesh, surfaceNames, zoneOpaqueMesh, zoneNames, zoneWeights, [], zoneInletParams, zoneHasWindows, zoneBrepsNonSolid, includeOutdoor, zoneWindowMesh, zoneWindowTransmiss, outdoorPtHeightWeights, zoneWindowNames, zoneFloorReflect, zoneSrfTypes, addShdTransmiss
def checkViewResolution(viewResolution, lb_preparation):
newVecs = []
skyViewVecs = []
newVecsAreas = []
skyViewVecsAreas = []
skyPatches = lb_preparation.generateSkyGeo(rc.Geometry.Point3d.Origin, viewResolution, 1)
normPatchArea = 6.28318530723/len(skyPatches)
for patch in skyPatches:
patchAreaProps = rc.Geometry.AreaMassProperties.Compute(patch)
patchPt = patchAreaProps.Centroid
patchAreaNorm = (patchAreaProps.Area*sc.sticky["honeybee_ConversionFactor"]*sc.sticky["honeybee_ConversionFactor"])/normPatchArea
Vec = rc.Geometry.Vector3d(patchPt.X, patchPt.Y, patchPt.Z)
revVec = rc.Geometry.Vector3d(-patchPt.X, -patchPt.Y, -patchPt.Z)
skyViewVecs.append(Vec)
skyViewVecsAreas.append(patchAreaNorm)
newVecs.append(Vec)
newVecs.append(revVec)
newVecsAreas.extend([patchAreaNorm, patchAreaNorm])
return newVecs, skyViewVecs, newVecsAreas, skyViewVecsAreas
def allSame(items):
return all(x == items[0] for x in items)
def parallel_projection(zoneSrfsMesh, viewVectors, pointList):
#Placeholder for the outcome of the parallel projection.
pointIntList = []
for point in pointList: pointIntList.append([])
#Keep track of the divisor.
divisor = len(viewVectors)
def intersect(i):
point = pointList[i]
#Create the rays to be projected from each point.
pointRays = []
for vec in viewVectors: pointRays.append(rc.Geometry.Ray3d(point, vec))
#Create a list that will hold the intersection hits of each surface
srfHits = []
for srf in zoneSrfsMesh: srfHits.append([])
#Perform the intersection of the rays with the mesh.
pointIntersectList = []
for rayCount, ray in enumerate(pointRays):
pointIntersectList.append([])
for srf in zoneSrfsMesh:
intersect = rc.Geometry.Intersect.Intersection.MeshRay(srf, ray)
if intersect == -1: intersect = "N"
pointIntersectList[rayCount].append(intersect)
#Find the intersection that was the closest for each ray.
for list in pointIntersectList:
if allSame(list) == False:
minIndex, minValue = min(enumerate(list), key=operator.itemgetter(1))
srfHits[minIndex].append(1)
#Sum up the lists and divide by the total rays to get the view factor.
for hitList in srfHits:
pointIntList[i].append(sum(hitList)/divisor)
tasks.Parallel.ForEach(range(len(pointList)), intersect)
return pointIntList
def parallel_skyProjection(zoneOpaqueMesh, skyViewVecs, skyViewVecsAreas, pointList, zoneWindowMesh, zoneWindowTransmiss, zoneHasWindows, zoneWindowNames):
#Placeholder for the outcome of the parallel projection.
pointIntList = []
skyBlockedList = []
skyBlockWindowNameCount = []
for num in range(len(pointList)):
pointIntList.append(0.0)
skyBlockedList.append([])
skyBlockWindowNameCount.append([])
#Keep track of the divisor.
divisor = len(skyViewVecs)
def intersect(i):
point = pointList[i]
#Create the rays to be projected from each point.
pointRays = []
for vec in skyViewVecs: pointRays.append(rc.Geometry.Ray3d(point, vec))
#Perform the intersection of the rays with the opaque mesh.
pointIntersectList = []
for rayCount, ray in enumerate(pointRays):
pointIntersectList.append([])
for srf in zoneOpaqueMesh:
intersect = rc.Geometry.Intersect.Intersection.MeshRay(srf, ray)
if intersect == -1: pass
else: pointIntersectList[rayCount].append(1)
#See if the ray passed all of the context meshes.
finalIntersectList = []
for ray in pointRays:
finalIntersectList.append([])
for listCt, rayList in enumerate(pointIntersectList):
for intersect in rayList:
finalIntersectList[listCt].append(intersect)
finalViewCount = []
finalWindowNameCount = []
for rayListCount, rayList in enumerate(finalIntersectList):
if sum(rayList) == 0:
if zoneHasWindows == 2:
finalViewCount.append(1)
finalWindowNameCount.append(0)
else:
transmiss = 1
winNameList = []
for winCount, winMesh in enumerate(zoneWindowMesh):
intersect = rc.Geometry.Intersect.Intersection.MeshRay(winMesh, pointRays[rayListCount])
if intersect == -1: pass
else:
transmiss = transmiss * zoneWindowTransmiss[winCount]
winNameList.append(zoneWindowNames[winCount].upper())
finalViewCount.append(transmiss)
finalWindowNameCount.append(winNameList)
else:
finalViewCount.append(0)
finalWindowNameCount.append(0)
#Sum up the lists and divide by the total rays to get the view factor.
skyBlockedList[i] = finalViewCount
skyBlockWindowNameCount[i] = finalWindowNameCount
pointIntList[i] = sum(finalViewCount)/divisor
tasks.Parallel.ForEach(range(len(pointList)), intersect)
return pointIntList, skyBlockedList, skyBlockWindowNameCount
def checkOutdoorViewFac(outdoorTestPtViewFactor, testPtSkyView):
outdoorNonSrfViewFac = []
for ptCount, viewFac in enumerate(outdoorTestPtViewFactor):
outdoorNonSrfViewFac.append(1-sum(viewFac)-(testPtSkyView[ptCount]/2))
return outdoorNonSrfViewFac
def skyViewCalc(testPts, zoneOpaqueMesh, skyViewVecs, skyViewVecsAreas, zoneHasWindows, zoneWindowMesh, zoneWindowTransmiss, zoneWindowNames):
testPtSkyView = []
testPtSkyBlockedList = []
testPtBlockName = []
for zoneCount, pointList in enumerate(testPts):
if zoneHasWindows[zoneCount] > 0:
if parallel_ == True or parallel_ == None:
skyViewFactors, skyBlockedList, finalWindowNameCount = parallel_skyProjection(zoneOpaqueMesh[zoneCount], skyViewVecs, skyViewVecsAreas, testPts[zoneCount], zoneWindowMesh[zoneCount], zoneWindowTransmiss[zoneCount], zoneHasWindows[zoneCount], zoneWindowNames[zoneCount])
testPtSkyView.append(skyViewFactors)
testPtSkyBlockedList.append(skyBlockedList)
testPtBlockName.append(finalWindowNameCount)
else:
testPtSkyView.append([])
testPtSkyBlockedList.append([])
testPtBlockName.append([])
for pointCount, point in enumerate(pointList):
#Make the list that will eventually hold the view factors of each surface.
divisor = len(skyViewVecs)
#Create the rays to be projected from each point.
pointRays = []
for vec in skyViewVecs: pointRays.append(rc.Geometry.Ray3d(point, vec))
#Perform the intersection of the rays with the opaque mesh.
pointIntersectList = []
for rayCount, ray in enumerate(pointRays):
pointIntersectList.append([])
for srf in zoneOpaqueMesh[zoneCount]:
intersect = rc.Geometry.Intersect.Intersection.MeshRay(srf, ray)
if intersect == -1: pointIntersectList[rayCount].append(0)
else: pointIntersectList[rayCount].append(1)
#See if the ray passed all of the context meshes.
finalIntersectList = []
for ray in pointRays:
finalIntersectList.append([])
for listCt, rayList in enumerate(pointIntersectList):
for intersect in rayList:
finalIntersectList[listCt].append(intersect)
finalViewCount = []
finalWindowNameCount = []
for rayListCount, rayList in enumerate(finalIntersectList):
if sum(rayList) == 0:
if zoneHasWindows[zoneCount] == 2:
finalViewCount.append(1) #This is the code to indicate that the point is outside and there is no need to calculate a window transmissivity.
finalWindowNameCount.append(0)
else:
#The ray is not blocked but it is hitting a window and so we need to factor in the window transmissivity.
transmiss = 1
winNameList = []
for winCount, winMesh in enumerate(zoneWindowMesh[zoneCount]):
intersect = rc.Geometry.Intersect.Intersection.MeshRay(winMesh, pointRays[rayListCount])
if intersect == -1: pass
else:
transmiss = transmiss * zoneWindowTransmiss[zoneCount][winCount]
winNameList.append(zoneWindowNames[zoneCount][winCount].upper())
finalViewCount.append(transmiss)
finalWindowNameCount.append(winNameList)
else:
#The ray has been blocked by an opaque surface.
finalViewCount.append(0)
finalWindowNameCount.append(0)
#Sum up the lists and divide by the total rays to get the view factor.
testPtSkyBlockedList[zoneCount].append(finalViewCount)
testPtSkyView[zoneCount].append(sum(finalViewCount)/divisor)
testPtBlockName[zoneCount].append(finalWindowNameCount)
else:
testPtSkyView.append(0)
testPtSkyBlockedList.append([range(len(skyViewVecs))])
testPtBlockName.append([range(len(skyViewVecs))])
return testPtSkyView, testPtSkyBlockedList, testPtBlockName
def main(testPts, zoneSrfsMesh, viewVectors, includeOutdoor):
testPtViewFactor = []
for zoneCount, pointList in enumerate(testPts):
if parallel_ == True or parallel_ == None:
viewFactors = parallel_projection(zoneSrfsMesh[zoneCount], viewVectors, testPts[zoneCount])
testPtViewFactor.append(viewFactors)
else:
testPtViewFactor.append([])
for pointCount, point in enumerate(pointList):
#Make the list that will eventually hold the view factors of each surface.
testPtViewFactor[zoneCount].append([])
divisor = len(viewVectors)
#Create the rays to be projected from each point.
pointRays = []
for vec in viewVectors: pointRays.append(rc.Geometry.Ray3d(point, vec))
#Create a list that will hold the intersection hits of each surface
srfHits = []
for srf in zoneSrfsMesh[zoneCount]: srfHits.append([])
#Perform the intersection of the rays with the mesh.
pointIntersectList = []
for rayCount, ray in enumerate(pointRays):
pointIntersectList.append([])
for srf in zoneSrfsMesh[zoneCount]:
intersect = rc.Geometry.Intersect.Intersection.MeshRay(srf, ray)
if intersect == -1: intersect = "N"
pointIntersectList[rayCount].append(intersect)
#Find the intersection that was the closest for each ray.
for list in pointIntersectList:
try:
if allSame(list) == False:
minIndex, minValue = min(enumerate(list), key=operator.itemgetter(1))
srfHits[minIndex].append(1)
except: pass
#Sum up the lists and divide by the total rays to get the view factor.
for hitList in srfHits:
testPtViewFactor[zoneCount][pointCount].append(sum(hitList)/divisor)
return testPtViewFactor
def computeFloorReflect(testPts, testPtViewFactor, zoneSrfTypes, flrRefList):
# Set defaults and a list to be filled.
defaultRef = 0.2
zoneFlrReflects = []
includeOutdoor = False
if len(testPts) == len(flrRefList): includeOutdoor = True
#Compute the ground reflectivity from view factor calculations of ground surfaces.
for zoneCount, zonePts in enumerate(testPts):
zoneFlrReflects.append([])
for ptCount, pt in enumerate(zonePts):
ptViewFacs = []
ptRefs = []
if includeOutdoor == True:
if zoneCount != len(testPts) - 1:
for srfCount, srf in enumerate(zoneSrfTypes[zoneCount]):
if srf == 2 or srf == 2.25 or srf == 2.5 or srf == 2.75:
ptRefs.append(flrRefList[zoneCount][srfCount])
ptViewFacs.append(testPtViewFactor[zoneCount][ptCount][srfCount])
else:
for srfCount, srf in enumerate(zoneSrfTypes[zoneCount]):
if srf == 1 or srf == 1.5:
ptRefs.append(flrRefList[zoneCount][srfCount])
ptViewFacs.append(testPtViewFactor[zoneCount][ptCount][srfCount])
else:
for srfCount, srf in enumerate(zoneSrfTypes[zoneCount]):
if srf == 2 or srf == 2.25 or srf == 2.5 or srf == 2.75:
ptRefs.append(flrRefList[zoneCount][srfCount])
ptViewFacs.append(testPtViewFactor[zoneCount][ptCount][srfCount])
missingViewFac = 0.5 - sum(ptViewFacs)
ptViewFacs.append(missingViewFac)
ptRefs.append(defaultRef)
weightedFloorRef = 0
for refCount, ref in enumerate(ptRefs):
try:
weightedFloorRef = weightedFloorRef + ref*ptViewFacs[refCount]
except: pass
weightedFloorRef = weightedFloorRef * 2
zoneFlrReflects[zoneCount].append(weightedFloorRef)
return zoneFlrReflects
#If Honeybee or Ladybug is not flying or is an older version, give a warning.
initCheck = True
#Ladybug check.
if not sc.sticky.has_key('ladybug_release') == True:
initCheck = False
print "You should first let Ladybug fly..."
ghenv.Component.AddRuntimeMessage(w, "You should first let Ladybug fly...")
else:
try:
if not sc.sticky['ladybug_release'].isCompatible(ghenv.Component): initCheck = False
except:
initCheck = False
warning = "You need a newer version of Ladybug to use this compoent." + \
"Use updateLadybug component to update userObjects.\n" + \
"If you have already updated userObjects drag Ladybug_Ladybug component " + \
"into canvas and try again."
ghenv.Component.AddRuntimeMessage(w, warning)
#Honeybee check.
if not sc.sticky.has_key('honeybee_release') == True:
initCheck = False
print "You should first let Honeybee fly..."
ghenv.Component.AddRuntimeMessage(w, "You should first let Honeybee fly...")
else:
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): initCheck = False
except:
initCheck = False
warning = "You need a newer version of Honeybee to use this compoent." + \
"Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
ghenv.Component.AddRuntimeMessage(w, warning)
#Start clocks to give a total calculation time report at the end
total_ms = None
total_fs = None
#Set the default to not generate the mesh.
buildMesh = False
if _buildMesh == None: pass
else: buildMesh = _buildMesh
#Check the data input.
checkData = False
if initCheck == True:
copyHBZoneData()
hb_zoneData = sc.sticky["Honeybee_ViewFacotrSrfData"]
if hb_zoneData[10] == True:
lb_preparation = sc.sticky["ladybug_Preparation"]()
hb_viewFactor = sc.sticky["honeybee_ViewFactors"]
hb_hive = sc.sticky["honeybee_Hive"]()
checkData, gridSize, distFromFloor, viewResolution, removeInt, sectionMethod, sectionBreps, includeOutdoor, constantTransmis, addShdTransmiss = checkTheInputs()
#Create a mesh of the area to calculate the view factor from.
geoCheck = False
if checkData == True and buildMesh == True:
start = time.clock()
goodGeo = prepareGeometry(gridSize, distFromFloor, removeInt, sectionMethod, sectionBreps, includeOutdoor, constantTransmis, addShdTransmiss, hb_zoneData)
if goodGeo != -1:
geoCheck, testPtsInit, viewFactorBrep, viewFactorMeshActual, zoneWireFrame, zoneSrfsMesh, zoneSrfNames, zoneOpaqueMesh, testPtZoneNames, testPtZoneWeights, ptHeightWeights, zoneInletInfo, zoneHasWindows, zoneBrepsNonSolid, includeOutdoor, zoneWindowMesh, zoneWindowTransmiss, outdoorPtHeightWeights, zoneWindowNames, flrRefList, zoneSrfTypes, finalAddShdTransmiss = goodGeo
total_ms = time.clock() - start
#Unpack the data trees of test pts and mesh breps so that the user can see them and get a sense of what to expect from the view factor calculation.
testPts = DataTree[Object]()
viewFactorMesh = DataTree[Object]()
shadingContext = DataTree[Object]()
closedAirVolumes = DataTree[Object]()
viewMeshFaces = DataTree[Object]()
for brCount, branch in enumerate(testPtsInit):
for item in branch:testPts.Add(item, GH_Path(brCount))
for brCount, branch in enumerate(viewFactorMeshActual):
for item in branch: viewFactorMesh.Add(item, GH_Path(brCount))
for brCount, branch in enumerate(viewFactorBrep):
for item in branch: viewMeshFaces.Add(item, GH_Path(brCount))
for brCount, branch in enumerate(zoneOpaqueMesh):
for item in branch: shadingContext.Add(item, GH_Path(brCount))
for brCount, branch in enumerate(zoneBrepsNonSolid):
for item in branch: closedAirVolumes.Add(item, GH_Path(brCount))
#If all of the data is good and the user has set "_runIt" to "True", run the shade benefit calculation to generate all results.
if checkData == True and _runIt == True and geoCheck == True and buildMesh == True:
start = time.clock()
viewVectors, skyViewVecs, newVecsAreas, skyViewVecsAreas = checkViewResolution(viewResolution, lb_preparation)
testPtViewFactor = main(testPtsInit, zoneSrfsMesh, viewVectors, includeOutdoor)
testPtSkyView, testPtBlockedVec, testPtBlockName = skyViewCalc(testPtsInit, zoneOpaqueMesh, skyViewVecs, skyViewVecsAreas, zoneHasWindows, zoneWindowMesh, zoneWindowTransmiss, zoneWindowNames)
outdoorNonSrfViewFac = []
if sectionMethod != 0 and includeOutdoor == True:
outdoorIsThere = True
outdoorNonSrfViewFac = checkOutdoorViewFac(testPtViewFactor[-1], testPtSkyView[-1])
else: outdoorIsThere = False
finalFloorRefList = computeFloorReflect(testPtsInit, testPtViewFactor, zoneSrfTypes, flrRefList)
total_fs = time.clock() - start
#Put all of the information into a list that will carry the data onto the next component easily.
viewFactorInfo = hb_viewFactor(testPtViewFactor, zoneSrfNames, testPtSkyView, testPtBlockedVec, testPtZoneWeights, \
testPtZoneNames, ptHeightWeights, zoneInletInfo, zoneHasWindows, outdoorIsThere, outdoorNonSrfViewFac, \
outdoorPtHeightWeights, testPtBlockName, zoneWindowTransmiss, zoneWindowNames, finalFloorRefList, \
constantTransmis, finalAddShdTransmiss)
viewFactorInfo = hb_hive.addNonGeoObjToHive(viewFactorInfo, ghenv.Component)
#Print out a report of calculation time.
print "_"
if total_ms != None: print str(round(total_ms, 3)) + " seconds were spent creating the view factor mesh."
if total_fs != None: print str(round(total_fs, 3)) + " seconds were spent calculating view factors."
#Hide the outputs that are not highly important.
ghenv.Component.Params.Output[5].Hidden = True
ghenv.Component.Params.Output[9].Hidden = True
ghenv.Component.Params.Output[10].Hidden = True
if _runIt == True:
ghenv.Component.Params.Output[6].Hidden = True
ghenv.Component.Params.Output[2].Hidden = False
else:
ghenv.Component.Params.Output[6].Hidden = False
ghenv.Component.Params.Output[2].Hidden = True
| mostaphaRoudsari/Honeybee | src/Honeybee_Indoor View Factor Calculator.py | Python | gpl-3.0 | 90,643 |
#!/usr/bin/env python
"""
Query the github API for the git tags of a project, and return a list of
version tags for recent releases, or the default release.
The default release is the most recent non-RC version.
Recent is a list of unqiue major.minor versions, where each is the most
recent version in the series.
For example, if the list of versions is:
1.8.0-rc2
1.8.0-rc1
1.7.1
1.7.0
1.7.0-rc1
1.6.2
1.6.1
`default` would return `1.7.1` and
`recent -n 3` would return `1.8.0-rc2 1.7.1 1.6.2`
"""
from __future__ import print_function
import argparse
import itertools
import operator
from collections import namedtuple
import requests
GITHUB_API = 'https://api.github.com/repos'
class Version(namedtuple('_Version', 'major minor patch rc')):
@classmethod
def parse(cls, version):
version = version.lstrip('v')
version, _, rc = version.partition('-')
major, minor, patch = version.split('.', 3)
return cls(int(major), int(minor), int(patch), rc)
@property
def major_minor(self):
return self.major, self.minor
@property
def order(self):
"""Return a representation that allows this object to be sorted
correctly with the default comparator.
"""
# rc releases should appear before official releases
rc = (0, self.rc) if self.rc else (1, )
return (self.major, self.minor, self.patch) + rc
def __str__(self):
rc = '-{}'.format(self.rc) if self.rc else ''
return '.'.join(map(str, self[:3])) + rc
def group_versions(versions):
"""Group versions by `major.minor` releases.
Example:
>>> group_versions([
Version(1, 0, 0),
Version(2, 0, 0, 'rc1'),
Version(2, 0, 0),
Version(2, 1, 0),
])
[
[Version(1, 0, 0)],
[Version(2, 0, 0), Version(2, 0, 0, 'rc1')],
[Version(2, 1, 0)],
]
"""
return list(
list(releases)
for _, releases
in itertools.groupby(versions, operator.attrgetter('major_minor'))
)
def get_latest_versions(versions, num=1):
"""Return a list of the most recent versions for each major.minor version
group.
"""
versions = group_versions(versions)
return [versions[index][0] for index in range(num)]
def get_default(versions):
"""Return a :class:`Version` for the latest non-rc version."""
for version in versions:
if not version.rc:
return version
def get_github_releases(project):
"""Query the Github API for a list of version tags and return them in
sorted order.
See https://developer.github.com/v3/repos/#list-tags
"""
url = '{}/{}/tags'.format(GITHUB_API, project)
response = requests.get(url)
response.raise_for_status()
versions = [Version.parse(tag['name']) for tag in response.json()]
return sorted(versions, reverse=True, key=operator.attrgetter('order'))
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('project', help="Github project name (ex: docker/docker)")
parser.add_argument('command', choices=['recent', 'default'])
parser.add_argument('-n', '--num', type=int, default=2,
help="Number of versions to return from `recent`")
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
versions = get_github_releases(args.project)
if args.command == 'recent':
print(' '.join(map(str, get_latest_versions(versions, args.num))))
elif args.command == 'default':
print(get_default(versions))
else:
raise ValueError("Unknown command {}".format(args.command))
if __name__ == "__main__":
main()
| alexandrev/compose | script/versions.py | Python | apache-2.0 | 3,819 |
# -*- coding: utf-8 -*-
# Copyright 2013, 2014, 2015, 2016, 2017, 2018 Kevin Reid and the ShinySDR contributors
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
"""API for plugins, and related things.
This module contains objects and interfaces used by plugins to declare
the functionality they provide.
"""
# pylint: disable=signature-differs
# (pylint is confused by interfaces)
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import namedtuple
import six
from twisted.plugin import IPlugin
from zope.interface import Attribute, Interface, implementer
from shinysdr.i.modes import IModeDef
from shinysdr.types import EnumRow
__all__ = [] # appended later
class IDemodulatorFactory(Interface):
def __call__(mode, input_rate, context):
"""
Returns a new IDemodulator.
mode: unicode, the mode to be demodulated (should be one the factory/class was declared to support)
input_rate: float, sample rate the demodulator must accept
context: an IDemodulatorContext
May support additional keyword arguments as supplied by unserialize_exported_state.
"""
__all__.append('IDemodulatorFactory')
class IDemodulator(Interface):
"""
Demodulators may also wish to implement:
IDemodulatorModeChange
ITunableDemodulator
Additional constraints:
The object must also be GNU Radio block with one gr_complex input, and output as described by get_output_type().
"""
def get_band_shape():
"""
Returns a BandShape object describing the portion of its input signal which the demodulator uses (typically, the shape of its filter).
Should be exported, typically like:
@exported_value(type=BandShape, changes='never')
This is used to display the filter on-screen and to determine when the demodulator's input requirements are satisfied by the device's tuning.
"""
def get_output_type():
"""
Return the SignalType of the demodulator's output.
The output must be stereo audio, mono audio, or nothing. Note that stereo audio is represented as a vector of two floats, not as two output ports.
"""
__all__.append('IDemodulator')
class IDemodulatorContext(Interface):
def rebuild_me():
"""Request that this demodulator be discarded and an identically configured copy be created.
This is needed when something such as the output type of the demodulator changes; it may also be used any time constructing a new demodulator is more convenient than changing the internal structure of an existing one.
"""
def lock():
"""
Use this method instead of gr.hier_block2.lock().
This differs in that it will avoid acquiring the lock if it is already held (implementing a "recursive lock"). It is therefore suitable for use when the demodulator is being invoked in a situation where the lock may already be held.
"""
def unlock():
"""Use in pairs with IDemodulatorContext.lock()."""
def output_message(message):
"""Report a message output from the demodulator, such as in demodulators which handle packets rather than audio.
The message object should provide shinysdr.telemetry.ITelemetryMessage.
"""
def get_absolute_frequency_cell():
"""Returns a cell containing the original RF carrier frequency of the signal to be demodulated — the frequency the signal entering the demodulator has been shifted down from."""
class ITunableDemodulator(IDemodulator):
"""If a demodulator implements this interface, then there may be a arbitrary frequency offset in its input signal, which it will be informed of via the set_rec_freq method."""
def set_rec_freq(freq):
"""
Set the nominal (carrier) frequency offset of the signal to be demodulated within the input signal.
"""
__all__.append('ITunableDemodulator')
class IDemodulatorModeChange(IDemodulator):
"""If a demodulator implements this interface, then it may be asked to reconfigure itself to demodulate a different mode."""
def can_set_mode(mode):
"""
Return whether this demodulator can reconfigure itself to demodulate the specified mode.
If it returns False, it will typically be replaced with a newly created demodulator.
"""
def set_mode(mode):
"""
Per can_set_mode.
"""
__all__.append('IDemodulatorModeChange')
# TODO: BandShape doesn't really belong here but it is related to IDemodulator. Find better location.
# All frequencies are relative to the demodulator's input signal (i.e. baseband)
_BandShape = namedtuple('BandShape', [
'stop_low', # float; lower edge of stopband
'pass_low', # float; lower edge of passband
'pass_high', # float; upper edge of passband
'stop_high', # float; upper edge of stopband
'markers', # dict of float to string; labels of significant frequencies (e.g. FSK mark and space)
])
class BandShape(_BandShape):
@classmethod
def lowpass_transition(cls, cutoff, transition, markers=None):
if markers is None:
markers = {}
h = transition / 2.0
return cls(
stop_low=-cutoff - h,
pass_low=-cutoff + h,
pass_high=cutoff - h,
stop_high=cutoff + h,
markers=markers)
@classmethod
def bandpass_transition(cls, transition, low, high, markers=None):
if markers is None:
markers = {}
h = transition / 2.0
return cls(
stop_low=low - h,
pass_low=low + h,
pass_high=high - h,
stop_high=high + h,
markers=markers)
__all__.append('BandShape')
class IModulatorFactory(Interface):
def __call__(mode, context):
"""
Returns a new IModulator.
mode: unicode, the mode to be modulated (should be one the factory/class was declared to support)
context: always None, will later become IModulatorContext when that exists.
May support additional keyword arguments as supplied by unserialize_exported_state.
"""
class IModulator(Interface):
"""
Additional constraints:
The object must also be a GNU Radio block with one gr_complex output, and input as described by get_input_type().
"""
def can_set_mode(mode):
"""
Return whether this modulator can reconfigure itself to modulate the specified mode.
If it returns False, it will typically be replaced with a newly created modulator.
"""
def set_mode(mode):
"""
Per can_set_mode.
"""
def get_input_type():
"""
Return the SignalType of the modulator's required input, which must currently be mono audio at any sample rate.
"""
def get_output_type():
"""
Return the SignalType of the modulator's output, which must currently be IQ at any sample rate.
"""
__all__.append('IModulator')
class IHasFrequency(Interface):
# TODO: document this
def get_freq():
pass
__all__.append('IHasFrequency')
@implementer(IPlugin, IModeDef)
class ModeDef(object):
# Twisted plugin system caches whether-a-plugin-class-was-found permanently, so we need to avoid _not_ having a ModeDef if the plugin has some sort of dependency it checks -- thus the 'available' flag can be used to hide a mode while still having an _IModeDef
def __init__(self,
mode,
info,
demod_class,
mod_class=None,
unavailability=None):
"""
mode: String uniquely identifying this mode, typically a standard abbreviation written in uppercase letters (e.g. "USB", "WFM").
info: An EnumRow object with a label for the mode, or a string.
The EnumRow sort key should be like the mode value but organized for sorting with space as a separator of qualifiers (e.g. "SSB U", "FM W").
demod_class: Class (or factory function) to instantiate to create a demodulator for this mode. Should provide IDemodulatorFactory but need not declare it.
mod_class: Class (or factory function) to instantiate to create a modulator for this mode. Should provide IModulatorFactory but need not declare it.
unavailability: This mode definition will be ignored if this is a string rather than None. The string should be an error message informative to the user (plain text, significant whitespace).
"""
if isinstance(unavailability, bool):
raise Exception('unavailability should be a string or None')
self.mode = six.text_type(mode)
self.info = EnumRow(info)
self.demod_class = demod_class
self.mod_class = mod_class
self.unavailability = None if unavailability is None else six.text_type(unavailability)
@property
def available(self):
return self.unavailability is None
__all__.append('ModeDef')
class _IClientResourceDef(Interface):
"""
Client plugin interface object.
This interface is needed to make the plugin system work and is not intended to be reimplemented; just use ClientResourceDef.
"""
key = Attribute("""A unique string, prefixed by the plugin's package name.""")
resource = Attribute(
"""A twisted.web.resource.Resource to be added to the web server.
Must not provide any authority (e.g. just static CSS/JS files are OK).
""")
load_css_path = Attribute("""Optional path relative to within `resource` to load as CSS.""")
load_js_path = Attribute("""Optional path relative to within `resource` to load as JavaScript.""")
@implementer(IPlugin, _IClientResourceDef)
class ClientResourceDef(object):
def __init__(self, key, resource, load_css_path=None, load_js_path=None):
"""
key: A unique string, prefixed by the plugin's package name.
resource: A twisted.web.resource.Resource to be added to the web server.
Must not provide any authority (e.g. just static CSS/JS files are OK).
load_css_path: Optional path relative to within `resource` to load as CSS.
load_js_path: Optional path relative to within `resource` to load as JavaScript.
"""
self.key = key
self.resource = resource
self.load_css_path = load_css_path
self.load_js_path = load_js_path
__all__.append('ClientResourceDef')
| kpreid/shinysdr | shinysdr/interfaces.py | Python | gpl-3.0 | 11,304 |
# -*- coding: utf-8 -*-
########################## Copyrights and license ############################
# #
# Copyright 2011-2015 Christian Lupien <christian.lupien@usherbrooke.ca> #
# #
# This file is part of pyHegel. http://github.com/lupien/pyHegel #
# #
# pyHegel is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# pyHegel is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with pyHegel. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from __future__ import absolute_import
import numpy as np
import scipy.linalg as la
import types
# TODO error handling
def oneDpoly(X,m,rank=None):
""" m est le nombre de termes à créer.
if rank is given then the highest power will be rank
(and m is not used)
"""
if rank is not None: m=rank+1
power=np.arange(m)
#xs=X.shape+(1,) # add a broadcast dimension
#return X.reshape(xs) ** power
return X[...,np.newaxis] ** power
DefaultPoly = oneDpoly
def twoDpoly(X,m=3,rank=None):
""" The first dimension of X as size 2 and represent the (x, y) coordinates
On return the first dimensions is removed and a new dimension is
added at the end of lenght m
if rank is given it is the rank of the polynomial and it will
overide the m value
The default m value is for a plane: z= a + bx + cy
dxy would introduce some quadratic term.
The terms for a certain rank need to from a closed group under
rotation of the plane. For example. a rotation of 45 deg to give
x', y' from x,y makes x= x'/s-y'/s and y=y'/s+x'/s with s=sqrt(2)
Then z=a+bx+cy+dxy becomes z=a+(b/s+c/s)x' + (c/s-b/s)y' +
(d/s^2)(x'^2 - y'^2)
Therefore to be a closed group you need to add
ex^2 + fy^2 and this gives the 2nd rank 2D polynomial
"""
if rank is not None: m= (rank+1)*(rank+2)/2
maxp=int(np.ceil(np.sqrt(m)))
mr=range(maxp)
powercomb = [[px,py] for px in mr for py in mr]
# Sort in place first with the smallest sum of power
# then with the smallest max power first i.e
# [1,1] before [0,2] and [2,0]
powercomb.sort(key=lambda x: [sum(x),max(x)])
powercomb=np.array(powercomb[:m]) # pick only first m powers
return X[0][...,np.newaxis]**powercomb[...,0] * X[1][...,np.newaxis]**powercomb[...,1]
def gen_polyeval(X, pf, param=None, sel=None):
""" Calls func and returns created polynomial
here pf is a sequence (p,func)
Pour le tableau x(N,...,M,T)=func(X,m,param) et p(T,S,P,Q,...),
m est T=p.shape[0]
ou T est le nombre de parametre du fit et S,P,Q ... si présent
sont le nombre de lissage différents réalisé.
Le résultat est Y(N,...,M,S,P,Q,...) = x(N,...,M,T+) * p(T+,S,P,Q,...)
(le + indique a sum product: 2D produit matricielle)
sel si definie fera ls sélection p[:,sel]
sel can be the result of np.index_exp
which is detected if sel is a tuple
Les premières dimensions représenntent des conditions différentes
(positions xy différentes ...) les dernières le résultat de différents
lissage (des layers de carte 3D)
DefaultPoly is the function used by default if func is
None (initialized to oneDpoly
"""
p,func = pf
if not func: func = DefaultPoly
m=p.shape[0]
if sel is not None:
if isinstance(sel,tuple):
p=p[ (slice(None),)+sel ]
else:
p = p[:,sel]
return np.tensordot(func(X,m,param),p,axes=(-1,0))
def lstsq_er(X,Y,cond=-1):
# following numerical recipes
# X= U S Vh
# U, Vh are unitary matrices (orthogonal): So U U^T =1
# s is diag
# For X (N,M),
# then U, Vh are (N,K), (K,M)
# s is (K) K=min(N,M) (really should be (K,K) but only keep diag elements)
# Here M is the number of parameters so M<N so K=M
# Y is either (N,) or (N,P)
U,s,Vh = la.svd(X, full_matrices=False)
TOL=1e-13 # for double: Also look at np.finfo(0.).eps and .resolution (base 10 rounded of .eps)
if cond == -1: cond=TOL
# s[0] should be s,max()
s = np.where(s>cond*s[0], s,s*0.)
invs = np.where(s!=0, 1./s, s*0.)
# The solution for Y (N,) or (N,L)
# p = Vh^T S^-1 U^T Y
#invsd = np.diag(invs)
#p = np.dot(Vh.T, np.dot(invsd, np.dot(U.T, Y) ) )
p = np.dot(Vh.T, (invs*(np.dot(U.T, Y)).T).T ) # faster maybe...
# covar = (A^T A)^-1 = V (S^T S)^-1 V^T
# (S^T S)^-1 = 1/diag^2 (the square of the inverse of the diag elements)
#invsd2 = invsd**2
#covar = np.dot(Vh.T, np.dot(invsd2, Vh) # covar is MxM
covar = np.dot(Vh.T, Vh*(invs**2)[:,None]) #faster than above (one less dot)
pe = np.sqrt(covar.diagonal())
pei = 1./pe
covar = covar*pei[None,:]*pei[:,None]
resids = np.sum( (Y.T-np.dot(X,p).T)**2, axis=-1)
rank = len(np.flatnonzero(s))
return p, resids, pe, (U,s,Vh,rank, covar)
def gen_polyfit(X,Y,m,s=None,func=None, param=None,adjust=None, p0=None, filter=None, errors=True, rcond=-1):
"""
This calcutes the fit of a general polynomial expression
method in yorick defaults to LUsolve, otherwise it is SVsolve
Here we use scipy.linalg.lstsq
La forme de x=func(X,m,param) est (M,..,N,T)
ou T est la liste de parametres de la reponse.
Y est (M,...,N,S), ou S est optionnel et represente des
solutions supplementaires
La reponse est soit 1D ou 2D
s est l'erreur standard. Elle ajuste le poid du lissage.
s est scalaire (effet seulement sur les erreurs, pas sur le lissage)
ou s (M,...,N)
ou s (M, ..., N, S)
Attention, dans ce dernier cas le lissage sera beaucoup plus lent
puisque tous les S fit devront être fait séparément.
Dans ce cas Y ne peut pas avoir
Si s n'est pas spécifié, c'est l'équivalent de s=1.
Le lissage est le résultat de minimiser:
\chi^2 = \sum_{i=1}^N {\left(\frac{y_i - f(x_i)}{s_i}\right)}^2
En prenant les dérivés avec p (les paramètres) et en les mettant =0
On trouve le minimum.
puisque f(x_i) = p_0 x_i^0 + p_1 x_i^1 ...
où x^n ne veut pas nécessairement duire la puissance n de x
mais bien la combinaison n des éléments du vecteur x_i
(x_i est une vecteur de paramètre indépendant pour le point i)
donc d chi^2/dp nous donne les équations
X p = Y
où X_k,l = sum_i (x_i^k x_i^l/s_i^2)
p_l sont les paramètres
Y_k = sum_i (y_i x_i^k/s_i^2)
C'est le système d'équations que lstsq résous de façon interne.
Ce qu'on donne à lstsq est X' p = Y'
où X'_i,l = x_i^l/s_i
p_l commen ci-haut
Y'_i = y_i/s_i
(La définition de lstsq utilise s_i =1)
Pour obtenir X et Y de X' et Y' lstsq fait
X=X'^T X', Y=X'^T Y' où ^T est la transpose
Donc si on redéfinie X' et Y' comme ci-haut (divise par s_i)
on exécutera une lissage linéaire avec poids variables.
filter is a M,.., N matrix where 1 selects the data point
and 0 deselects it
(actually it is selected if >0.5)
Si Y(X) est un vecteur (Disons Y=(Yx, Yy) ou Yx=fx(x,y) et Yy=fy(x,y)
pour un vecteur Y 2D dépédant d'un vecteur xy=(x,y) aussi 2D.
Si Yy et Yx sont indépendant ils peuvent être lissé séparément.
Si ils utilisent la même fonction mais des paramètres différents alors
ils peuvent être lissé en même temps (Yx et Yy forment la dimension S
de Y)
Sinon, Yx et Yy doivent être combinés dans le même Y (sans la dimention S
et le vecteur X ne sera pas le même pour les éléments Yx et Yy.
Certains des éléments de X seront mis à zéro si il ne s'applique pas aux
deux avec le même paramètre.
adjust is either an array(list) or a selector to pick the
parameters to adjust (using their index).
You can set the value of the unused parameters with p0
it should be a vector of shape T or T,S
rcond is the same as in lstsq
La fonction retourne: pf,resids,pe, extras
où extras est un dict: chiNorm, sigmaCorr, rank, sv, covar (voir lstsq)
pe sont les erreurs sur les parametres
sv are the singular values
= [] si errors=False
même chose pour covar (covar renorm, diag=1)
Si errors == 1 or True: mode auto (corrige pe si il n'y a pas de s)
sinon retourne le pe calculé
Pour errors ce sont des flags:
1: Calculer les erreurs
2: Mode non-automatique
4: corrige le pe (si mode non-auto)
8: pe peut avoir dimension differente de p
(sinon p et pe ont les même dimensions)
Toutes les combinaisons (sommes) sont permises.
mode auto (corrige pe si il n'y a pas de s
sinon retourne le pe calculé)
Valeur défaut: 1 ou True (erreurs auto et même forme que p)
Pas d'erreurs: 0 ou False
Erreurs non-corrigées: 1+2 = 3
Erreurs corrigés: 1+2+4 = 7
Erreurs corrigées signifie que l'on ajuste les s
pour obtenir chiNorm=1 (chiNorm= chi/dof), chi=resids
où pf est (p,func) et peut être utilisé avec gen_polyeval
Exemple: pour function Y= (a*x+b*y+c,d*x+e*y+c),
donc Y1=a*x1_b*y1+c, Y2=d*x1+e*y1+c,
Y3=a*x2_b*y2+c, Y4=d*x2+e*y2+c ...
donc X=(x,y,1,x,y) (pour les paramètres (a,b,c,d,e)
pour Y1: X1=(x1,y1,1,0,0), Y2: X2=(0,0,1,x1,y1) ...
"""
if not func: func = DefaultPoly
# need to check this, deal with s (errors), method and adjust
x=func(X,m,param)
m=x.shape[-1]
xx=x.reshape((-1,m))
if x.ndim == Y.ndim: #multiple solutions
nfits = Y.shape[-1]
y=Y.reshape((-1, nfits))
multi = True
else: #single solution
y=Y.ravel()
multi = False
nfits = 0
errors = int(errors) # True ->1, False ->0
if errors&1 == 0 : errors = 0
if not errors: covar=pe = []
elif errors&2 == 0: # automatic mode
if s is None: errors |= 4 # fix pe
else: errors &= ~4 #keep pe
needloop=False
if s is not None:
s=np.asarray(s)
ss=s.shape
if s.ndim == 0:
#scalar, only changes error calc (chi square)
s=s.reshape((1,1))
elif s.ndim == x.ndim-1:
# Same errors for all y sets
s=s.ravel()[:,None]
elif s.ndim == Y.ndim: # and s.ndim == x.ndim
# different errors for every y sets
s=s.reshape((-1, nfits))
needloop = True
else:
raise ValueError, 'shape mismatch: s is not a valid shape'
if adjust is not None:
pind = np.arange(m)
adjust = pind[adjust] # in case adjust is a selector
#we make sure we don't change the order, and don't repeat
sel = np.intersect1d(pind, adjust)
mm = len(sel)
xo = xx
xx = xo[:,sel]
if p0 is not None:
p0 = np.asarray(p0) # if necessary, turn list into array
if p0.shape[0] != m:
raise ValueError, 'shape mismatch: p0 is not a valid shape'
# move the unadjusted parameters from left handside to right
# hanside of equation
unsel = np.setdiff1d(pind, adjust)
if nfits != 0 and p0.ndim == 1:
p0 = p0[:,None]
if len(unsel)>0:
y = y - np.tensordot(xo[:,unsel],p0[unsel],axes=(-1,0))
else: mm = m
ind=slice(None)
if filter is not None:
ind=np.flatnonzero(filter>0.5)
if len(ind) == 0:
ind = slice(None)
if needloop:
p=np.zeros((mm, nfits))
if errors:
pe=np.zeros((mm, nfits))
covar=np.zeros((mm,mm, nfits))
resids=np.zeros(nfits)
sv=np.zeros((mm, nfits))
for i in xrange(s.shape[1]):
xxs=xx[ind]/s[ind,i][:,None]
ys=y[ind,i]/s[ind,i]
if not errors:
p[:,i],resids[i],rank,sv[:,i] = la.lstsq(xxs,ys,cond=rcond)
else:
p[:,i],resids[i], pe[:,i], (foo1,sv[:,i],foo2,rank,covar[:,:,i]) = lstsq_er(xxs,ys,cond=rcond)
else:
if s is not None:
xx/=s
if multi:
y=y/s
else:
y=y/s[:,0]
xx=xx[ind]
ys=y[ind]
if not errors:
p,resids,rank,sv = la.lstsq(xx,ys,cond=rcond)
else:
p,resids,pe, (foo1,sv,foo2,rank,covar) = lstsq_er(xx,ys,cond=rcond)
if adjust is not None:
ptmp = p
if nfits != 0:
p=np.zeros((m,nfits))
else:
p=np.zeros(m)
p[sel]=ptmp
if p0 is not None:
p[unsel] = p0[unsel]
if errors:
petmp = pe
pes=list(pe.shape)
pes[0] = m
pe = np.zeros(pes)
pe[sel] = petmp
cvt = covar
cvts = list(covar.shape)
cvts[0] = cvts[1] = m
covar=np.zeros(cvts)
covar[sel[:,None],sel] = cvt
# ramk should be the same as mm
chiNorm = resids/(ys.shape[0]-mm) #this assumes the given errors are correct
# sigmaCorr is a correction factor that should multiply the given s
# Since wihtout a given s the caclculations assume s=1 this is simply the
# estimate of what should have been s in that case (to give the proper chi^2)
sigmaCorr = np.sqrt(chiNorm)
if errors&4:
if nfits>0 and pe.ndim==1:
pe=pe[:,None]
pe = pe *sigmaCorr
if errors and not errors&8: # force same shape
if nfits>0 and pe.ndim==1:
pe = pe[:,None] + np.zeros(nfits)
extras = dict(chiNorm=chiNorm, sigmaCorr=sigmaCorr,rank=rank,sv=sv,covar=covar)
return ((p,func), resids, pe, extras)
def rankdata(x, avg=True):
"""
Returns the rank (order from 1 to n) of the n elements of x
When avg = True (default), then for x values that are equal,
it returns the avg.
X can be either of shape (N,) or (N,M). It that second case,
The rank is obtained along the first dimension only
i.e. the rank operation is repeated for x[:,i]
It is faster and accepts more dimensions than scipy's version
See also: scipy.stats.rankdata
"""
xshapeOrg = x.shape
if x.ndim==1:
x= x[:,None]
#x is now 2d
sind = x.argsort(axis=0)
n = x.shape[0]
ranklist = np.arange(n)*1. + 1 # make it floats and starting at 1 not 0
nm = x.shape[1]
sind = (sind, np.arange(nm)[None,:])
ranklist = ranklist[:,None]
rank = np.empty(x.shape)
rank[sind] = ranklist
if avg: # deal with repeats
same = np.diff(x[sind],axis=0) == 0.
# add a row of False before, and a row of False after
falserow = np.zeros(nm)!= 0.
same = np.r_['0,2,1', falserow, same, falserow]
for i in xrange(nm):
ind = sind[0][:,i]
samei = same[:,i]
beg = samei[1:]>samei[:-1] # goes from False to True
end = samei[1:]<samei[:-1] # goes from True to False
begi = beg.nonzero()[0]
endi = end.nonzero()[0]
assert len(begi) == len(endi), 'begi end endi should be same length'
for b,e in zip(begi,endi):
sel = ind[b:e+1]
val = (b+e)/2.+1
print b,e,val
rank[sel,i] = val
return rank.reshape(xshapeOrg)
def report(X,Y,pf,func=None, s=1., param=None, adjust=None, filter=None):
""" Calculate a bunch of fit quality numbers.
Parameters are the same as for gen_polyfit.
pf is the result of a polynomial fit or just the parameters
for a the func.
Returns a dict with:
Chi square : chisq
Chi square /dof : chisqNorm
R square : R2
R : R
R square adjust : R2adjust
Pearson's r : pearson
Spearman's rank : spearman
r* : rstar
where R = sqrt(max(R2,0)), R2 range -inf, 1]
range [0,1] if fit contains a constant term
(which is used by KaleidoGraph which returns +inf when R2<0)
R2 = 1 - SSE/SSY
Also callled Coefficient of determination
where SSE is sum of squares of errors
SSY is sum of squares of Y-Yavg
can be less than 0 if fit func does not include
a constant term (to at least do like yavg).
R2 close to 1 means the fit goes through the points
R2 always increases with more fitting parameter.
see 15.2 of Numerical Recipes C++ 2nd
R2adjust = 1 - (SSE/dofe) / (SSY/dofy)
where dofy degree fo freedom for unbiased estimator = N-1
dofe = N-p
for p the number of fit parameters (including a constant term.)
This value should not increase when new parameters are added unless
they are a useful parameter.
see http://en.wikipedia.org/wiki/Coefficient_of_determination
Applied linear statistical models, 3rd, J. Neter, W. Wassermanm
W.H. Kutner section 7.5
Pearson's r = {Sum_i (x_i-xavg)(y_i-yavg)}/
sqrt{ [Sum_i (x_i-xavg)^2] [Sum_i (y_i - yavg)^2] }
range [-1,1]
Also called linear correlation coefficient or
product-moment correlation
It is +1 or -1 for a perfectly linear data set (data is on
a y=mx+b line) The correlation is between (y-yavg) and
(x-xavg) which corrrespond: yavg = m xavg +b
so y - yavg = mx +b - mxavg-b = m(x-avg)
See 14.5 of Numerical Recipes
Spearman's rank:
Same as Perasons except replace x and y by their rank.
So x_i -> rank(x_i), xavg=Avg(rank(x_i)) ...
For x_i = x_j = x_k ... we give them the same rank
which is the average of the basic rank.
This keeps the sum (and avg) a constant.
i.e. for 0.1, 0.5, 0.1, -0.2 we have
rank(0.5) = 4, rank(-0.2) = 1, rank(0.1) = 2.5
(basic rank for both 0.1 was 2 and 3)
It is a more robuts correlator than Person's r
See 14.6 of Numerical Recipes
14.8 of Introduction to probability and mathematical
statistics, 2nd, Bain, Engelhardt
Note that Pearson and Spearman values only make sense when x and
y are 1D vectors.
rstar:
Similar to Pearson but with y-yfit replacing x-xavg:
rstar = {Sum_i (y_i-f(x_i))(y_i-yavg)}/
sqrt{ [Sum_i (y_i-f(x_i))^2] [Sum_i (y_i - yavg)^2] }
with yfit = f(x_i) is the result of the fit evaluated
with the best fit parameters.
range [-1, 1]
This is a coorelator between the fit error and the data.
For a perfect fit it goes to 0.
Under certain conditions (see below) r = sqrt(1-R^2)
Note that chisq (chisqNorm, R2, R, R2adjust) pearson and rstar are
all extended in a conventional way to handle variable s_i.
Averages become weighted averages...
For a linear fit (y=mx+b):
R^2 = pearson^2 = 1-rstar^2
In the more general case where Sum_i (y_i-f(x_i))=0 and
Sum_ (y_i-f(x_i)) f(x_i) = 0 then
R^2 = 1-rstar^2
Since least-square fitting finds solution for
Sum_i (y_i-f(x_i)) df(x_i)/dpj = 0
The requirements need f to have one term independent of x,
and all the terms reproducible from a constant multiplied by
a derivative.
So f must have like: (p1, p2, ... are fit parameters)
p1 + p2*x
p1^2 + p2*exp(p3*x)
p1*p2+p2*x^2+p2*x^3+p3*x^4
but NOT of the form of
p1*(1+x^2) # no constant term: df/dp1 = 1+x^2
10+p1*x # no constant term: df/dp1 = 0 + x
p1+x^p2 # x*p2 is not reproduced by any df/dpi
p1+p2*x+p2^2*x^2 # 2nd and 3rd term not reproduced by any df/dpi
See also: scipy.stats.spearmanr, pearsonr
"""
ret = {}
if len(pf) == 2 and isinstance(pf[1], types.FunctionType):
yfit = gen_polyeval(X,pf,param=param)
p = pf[0]
else:
yfit = func(X,pf,param=param)
p = pf
m = p.shape[0] # number of parameters
s = np.asarray(s) # make sure it is an array
if p.ndim == 2:
nfits = p.shape[1]
if s.ndim == Y.ndim and s.shape[-1]!=1:
s = s.reshape((-1,nfits))
elif s.ndim > 0:
s = s.reshape((-1,1))
Y = Y.reshape((-1,nfits))
yfit = yfit.reshape((-1,nfits))
else:
nfits = 0
Y = Y.reshape((-1,))
yfit = yfit.reshape((-1,))
if s.ndim > 0:
s = s.reshape((-1,))
ind=slice(None)
if filter is not None:
# this needs to match with gen_polyfit
ind=np.flatnonzero(filter>0.5)
if len(ind) == 0:
ind = slice(None)
if filter.ndim == X.ndim:
X = X.ravel()[ind]
else: # filter is smaller than X, like
# for twoDpoly where X is [2, ...]
X = X.reshape((-1,filter.size))[:,ind]
X = X.ravel()
Y = Y[ind, ...]
yfit = yfit[ind, ...]
if s.ndim > 0 and s.shape[0]>1:
s= s[ind, ...]
else:
X = X.ravel()
Nx = X.size
N = Y.shape[0]
# X is now 1D always, even when it should not.
# Because there correlation coefficient don't really
# make sense there anyway. Need to event new ones.
if adjust is not None:
baseAdj = np.arange(m)
adjust = baseAdj[adjust] # in case adjust is a selector
#we make sure we don't change the order, and don't repeat
#could probably use unique1d if it is sorted.
m = len(np.intersect1d(baseAdj, adjust))
#w = 1./s
w = (1./s)**2
# weighted average: yavg = (sum wi yi)/sum wi
# unweighed wi =1
# we need to deal properly wi the case
# if wi are all the same (jsut one element broadcast)
# we can do that with: yavg = mean(w*y)/mean(w)
# where mean(x) = (sum_i x_i) / N
# Which works fine in either case (Nw=1 or =Ny)
wavg = np.mean(w,axis=0)
Yavg = np.mean(Y*w,axis=0)/wavg # or np.average
Yfd = (Y-yfit)
chisq = np.sum(Yfd**2 * w, axis=0)
chisqNorm = chisq/(N-m)
ret['chisq'] = chisq
ret['chisqNorm'] = chisqNorm
Yad = (Y - Yavg)
wSSya = np.sum((Yad**2*w),axis=0)
R2 = 1 - chisq/wSSya
ret['R2'] = R2
ret['R'] = np.sqrt(np.maximum(0,R2))
R2adjust = 1 - chisqNorm/(wSSya/(N-1))
ret['R2adjust'] = R2adjust
# r star
rstar = np.sum(Yfd*Yad*w,axis=0)/np.sqrt(chisq*wSSya)
ret['rstar'] = rstar
if Nx != N:
ret['pearson'] = None
ret['spearman'] = None
else:
# pearson
if nfits > 0:
Xavg = np.mean(X[:,None]*w,axis=0)/wavg
Xad = (X[:,None]-Xavg)
else:
Xavg = np.mean(X*w)/wavg
Xad = (X - Xavg)
wSSxa = np.sum(Xad**2*w,axis=0)
pearson = np.sum(Xad*Yad*w,axis=0)/np.sqrt(wSSxa*wSSya)
ret['pearson'] = pearson
# spearman, don't consider the sigma's (weight)
Xrank = rankdata(X)
Yrank = rankdata(Y)
rankavg = (N+1.)/2 # 1,2,3 -> 2, 1.5, 1.5,3,4 ->2.5 ...
Xrd = Xrank-rankavg
if nfits > 0: Xrd = Xrd[:,None]
Yrd = Yrank-rankavg
spearman = np.sum(Xrd*Yrd,axis=0)/ \
np.sqrt(np.sum(Xrd**2,axis=0)*np.sum(Yrd**2,axis=0))
ret['spearman'] = spearman
return ret
# test/example routine
if __name__ == "__main__":
from matplotlib.pylab import figure, clf, plot, legend, show, ylim
#from pretty import pprint
from pprint import pprint
N=500
x=np.linspace(0,1,N)
y=3+5*x**2+np.random.randn(N)*.1
y[200]=100
figure(1)
clf()
plot(x,y,label='data')
ss=y*0+1.
ER=True
#ER=False
(pf,res,pe,extras) = gen_polyfit(x,y,3,errors=ER)
plot(x,gen_polyeval(x,pf),'g',label='fit no s')
pprint (('fit no s',pf[0],pe,res,extras))
pprint (report(x,y,pf))
(pf,res,pe,extras) = gen_polyfit(x,y,3,s=10,errors=ER)
plot(x,gen_polyeval(x,pf),'r',label='fit constant s')
pprint (('fit constant s',pf[0],pe,res,extras))
pprint ( report(x,y,pf,s=10) )
ss[200]=100
(pf,res,pe,extras) = gen_polyfit(x,y,3,s=ss,errors=ER)
plot(x,gen_polyeval(x,pf),'k', label='fit with s')
pprint (( 'fit with s',pf[0],pe,res,extras ))
pprint ( report(x,y,pf,s=ss) )
legend()
ylim(0,10)
# compare with leastsq
fn = lambda p,x,y,s: (y-gen_polyeval(x,(p,oneDpoly)))/s
from scipy.optimize import leastsq
(pf,res,pe,extras) = gen_polyfit(x,y,3,s=ss,errors=3)
p0 = pf[0]*2.
rr=leastsq(fn, p0, args=(x,y,ss), full_output=True)
pre = np.sqrt(rr[1].diagonal())
print '========== non linear fit start ========='
pprint (( 'polyfit', pf[0], pe, extras['covar'], (extras['covar']*pe*pe[:,None]).round(4) ))
pprint ( report(x,y,pf,s=ss) )
pprint (( 'non linear', rr[0],pre, rr[1]/pre/pre[:,None], rr[1].round(4) ))
pprint ( report(x,y,rr[0],s=ss,func=lambda x,p,param=None:gen_polyeval(x,(p,oneDpoly))) )
print '========== non linear fit end ========='
figure(2)
clf()
figure(3)
clf()
figure(4)
clf()
xx=x+np.array([0,1.01])[:,None]
yy=np.zeros((2,N,3))
yy[:,:,0]=4+6*xx**2+np.random.randn(2,N)*.1
yy[:,:,1]=1+2*xx**2+np.random.randn(2,N)*.1
yy[...,2]=5+9*xx+np.random.randn(2,N)*.2
yy[1,20,:]=-100
yy[0,N-50,2]=200
figure(2)
plot(xx.T,yy[...,0].T,'b',label='data')
figure(3)
plot(xx.T,yy[...,1].T,'b',label='data')
figure(4)
plot(xx.T,yy[...,2].T,'b',label='data')
sss=yy*0+1.
((p,f),res,pe,extras) = gen_polyfit(xx,yy,4,errors=ER)
figure(2)
plot(xx.T,gen_polyeval(xx,(p[:,0],f)).T,'g',label='fit no s')
figure(3)
plot(xx.T,gen_polyeval(xx,(p[:,1],f)).T,'g',label='fit no s')
figure(4)
plot(xx.T,gen_polyeval(xx,(p[:,2],f)).T,'g',label='fit no s')
pprint (( 'fit no s',p,pe,res,extras ))
pprint ( report(xx,yy,(p,f)) )
((p,f),res,pe,extras) = gen_polyfit(xx,yy,4,s=10,adjust=[0,0,2],
p0=np.array([[-1,-20,-3,0],[-.1,-.2,-.3,0],[11,9,13,0]]).T,errors=ER)
figure(2)
plot(xx.T,gen_polyeval(xx,(p[:,0],f)).T,'r',label='fit constant s, adj0,2')
figure(3)
plot(xx.T,gen_polyeval(xx,(p[:,1],f)).T,'r',label='fit constant s, adj0,2')
figure(4)
plot(xx.T,gen_polyeval(xx,(p[:,2],f)).T,'r',label='fit constant s, adj0,2')
pprint (( 'fit constant s',p,pe,res,extras ))
pprint ( report(xx,yy,(p,f),s=10,adjust=[0,0,2]) )
sss[1,20,:]=100
sss[0,N-50,2]=-100 # negative sigma does not make sense, but code should not care
(pf,res,pe,extras) = gen_polyfit(xx,yy,4,s=sss,errors=ER)
figure(2)
plot(xx.T,gen_polyeval(xx,pf,sel=0).T,'c', label='fit with s')
figure(3)
plot(xx.T,gen_polyeval(xx,pf,sel=1).T,'c', label='fit with s')
figure(4)
plot(xx.T,gen_polyeval(xx,pf,sel=2).T,'c', label='fit with s')
pprint (( 'fit with s',pf[0],pe,res,extras ))
pprint ( report(xx,yy,pf,s=sss) )
(pf,res,pe,extras) = gen_polyfit(xx,yy,4,s=sss[...,0],errors=ER)
pprint (( 'fit with s uniform',p,pe,res,extras ))
pprint ( report(xx,yy,pf,s=sss[...,0]) )
(pf,res,pe,extras) = gen_polyfit(xx,yy,2,s=sss,errors=ER)
pprint (( 'fit with s linear(y=mx+b)',p,pe,res,extras ))
rep=report(xx,yy,pf,s=sss)
pprint ( rep )
pprint ( 1-rep['rstar']**2 )
figure(2)
legend()
ylim(5,15)
figure(3)
legend()
ylim(2,10)
figure(4)
legend()
ylim(7,17)
show()
| lupien/pyHegel | pyHegel/gen_poly.py | Python | lgpl-3.0 | 30,296 |
import os
import errno
def delete_file(file_name, dry=False):
if dry:
print(' DRY DELETED: {}'.format(file_name))
else:
os.remove(file_name)
try:
dirname = os.path.dirname(file_name)
os.rmdir(dirname)
print(' DELETED DIR: {}'.format(dirname))
except OSError as ex:
if ex.errno != errno.ENOTEMPTY:
raise
print(' DELETED: {}'.format(file_name))
def run_dircmpdel(dircmp_file, prompt=True, dry=False):
"""
Parse dircmp file for groups of file names to be deleted.
"""
with open(dircmp_file) as fp:
lines = fp.read()
groups = lines.strip().split('\n\n')
print('Found {} duplicate groups'.format(len(groups)))
groups = (group.split('\n') for group in groups)
checked_proper_cwd = False
for group in groups:
for i, file_name in enumerate(group):
if not i:
if not checked_proper_cwd:
if not os.path.exists(file_name):
raise RuntimeError('File {} could not be found. '
'Please ensure you are in the '
'correct directory.'
''.format(file_name))
checked_proper_cwd = True
print('Deleting duplicates of {}'.format(file_name))
else:
if prompt:
while True:
resp = input(' Delete {}? '.format(file_name))
resp = resp.lower()
if resp not in ('yes', 'no'):
print('Please answer "yes" or "no".')
elif resp == 'yes':
delete_file(file_name, dry=dry)
break
elif resp == 'no':
print(' Not deleted: {}'.format(file_name))
break
else:
delete_file(file_name, dry=dry)
print()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Utility for deleting duplicate files found by dircmp'
)
parser.add_argument('file')
parser.add_argument('--no-prompt',
action='store_false', default=True, dest='prompt')
parser.add_argument('-d', '--dry',
action='store_true', default=False, dest='dry')
args = parser.parse_args()
run_dircmpdel(args.file, prompt=args.prompt, dry=args.dry)
| logston/python-dircmp | dircmppy/dircmpdel.py | Python | bsd-2-clause | 2,648 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from differential_privacy.multiple_teachers import aggregation
from differential_privacy.multiple_teachers import deep_cnn
from differential_privacy.multiple_teachers import input
from differential_privacy.multiple_teachers import metrics
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string('dataset', 'svhn', 'The name of the dataset to use')
tf.flags.DEFINE_integer('nb_labels', 10, 'Number of output classes')
tf.flags.DEFINE_string('data_dir','/tmp','Temporary storage')
tf.flags.DEFINE_string('train_dir','/tmp/train_dir','Where model chkpt are saved')
tf.flags.DEFINE_string('teachers_dir','/tmp/train_dir',
'Directory where teachers checkpoints are stored.')
tf.flags.DEFINE_integer('teachers_max_steps', 3000,
'Number of steps teachers were ran.')
tf.flags.DEFINE_integer('max_steps', 3000, 'Number of steps to run student.')
tf.flags.DEFINE_integer('nb_teachers', 10, 'Teachers in the ensemble.')
tf.flags.DEFINE_integer('stdnt_share', 1000,
'Student share (last index) of the test data')
tf.flags.DEFINE_integer('lap_scale', 10,
'Scale of the Laplacian noise added for privacy')
tf.flags.DEFINE_boolean('save_labels', False,
'Dump numpy arrays of labels and clean teacher votes')
tf.flags.DEFINE_boolean('deeper', False, 'Activate deeper CNN model')
def ensemble_preds(dataset, nb_teachers, stdnt_data):
"""
Given a dataset, a number of teachers, and some input data, this helper
function queries each teacher for predictions on the data and returns
all predictions in a single array. (That can then be aggregated into
one single prediction per input using aggregation.py (cf. function
prepare_student_data() below)
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:param stdnt_data: unlabeled student training data
:return: 3d array (teacher id, sample id, probability per class)
"""
# Compute shape of array that will hold probabilities produced by each
# teacher, for each training point, and each output class
result_shape = (nb_teachers, len(stdnt_data), FLAGS.nb_labels)
# Create array that will hold result
result = np.zeros(result_shape, dtype=np.float32)
# Get predictions from each teacher
for teacher_id in xrange(nb_teachers):
# Compute path of checkpoint file for teacher model with ID teacher_id
if FLAGS.deeper:
ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '_deep.ckpt-' + str(FLAGS.teachers_max_steps - 1) #NOLINT(long-line)
else:
ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '.ckpt-' + str(FLAGS.teachers_max_steps - 1) # NOLINT(long-line)
# Get predictions on our training data and store in result array
result[teacher_id] = deep_cnn.softmax_preds(stdnt_data, ckpt_path)
# This can take a while when there are a lot of teachers so output status
print("Computed Teacher " + str(teacher_id) + " softmax predictions")
return result
def prepare_student_data(dataset, nb_teachers, save=False):
"""
Takes a dataset name and the size of the teacher ensemble and prepares
training data for the student model, according to parameters indicated
in flags above.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:param save: if set to True, will dump student training labels predicted by
the ensemble of teachers (with Laplacian noise) as npy files.
It also dumps the clean votes for each class (without noise) and
the labels assigned by teachers
:return: pairs of (data, labels) to be used for student training and testing
"""
assert input.create_dir_if_needed(FLAGS.train_dir)
# Load the dataset
if dataset == 'svhn':
test_data, test_labels = input.ld_svhn(test_only=True)
elif dataset == 'cifar10':
test_data, test_labels = input.ld_cifar10(test_only=True)
elif dataset == 'mnist':
test_data, test_labels = input.ld_mnist(test_only=True)
else:
print("Check value of dataset flag")
return False
# Make sure there is data leftover to be used as a test set
assert FLAGS.stdnt_share < len(test_data)
# Prepare [unlabeled] student training data (subset of test set)
stdnt_data = test_data[:FLAGS.stdnt_share]
# Compute teacher predictions for student training data
teachers_preds = ensemble_preds(dataset, nb_teachers, stdnt_data)
# Aggregate teacher predictions to get student training labels
if not save:
stdnt_labels = aggregation.noisy_max(teachers_preds, FLAGS.lap_scale)
else:
# Request clean votes and clean labels as well
stdnt_labels, clean_votes, labels_for_dump = aggregation.noisy_max(teachers_preds, FLAGS.lap_scale, return_clean_votes=True) #NOLINT(long-line)
# Prepare filepath for numpy dump of clean votes
filepath = FLAGS.data_dir + "/" + str(dataset) + '_' + str(nb_teachers) + '_student_clean_votes_lap_' + str(FLAGS.lap_scale) + '.npy' # NOLINT(long-line)
# Prepare filepath for numpy dump of clean labels
filepath_labels = FLAGS.data_dir + "/" + str(dataset) + '_' + str(nb_teachers) + '_teachers_labels_lap_' + str(FLAGS.lap_scale) + '.npy' # NOLINT(long-line)
# Dump clean_votes array
with tf.gfile.Open(filepath, mode='w') as file_obj:
np.save(file_obj, clean_votes)
# Dump labels_for_dump array
with tf.gfile.Open(filepath_labels, mode='w') as file_obj:
np.save(file_obj, labels_for_dump)
# Print accuracy of aggregated labels
ac_ag_labels = metrics.accuracy(stdnt_labels, test_labels[:FLAGS.stdnt_share])
print("Accuracy of the aggregated labels: " + str(ac_ag_labels))
# Store unused part of test set for use as a test set after student training
stdnt_test_data = test_data[FLAGS.stdnt_share:]
stdnt_test_labels = test_labels[FLAGS.stdnt_share:]
if save:
# Prepare filepath for numpy dump of labels produced by noisy aggregation
filepath = FLAGS.data_dir + "/" + str(dataset) + '_' + str(nb_teachers) + '_student_labels_lap_' + str(FLAGS.lap_scale) + '.npy' #NOLINT(long-line)
# Dump student noisy labels array
with tf.gfile.Open(filepath, mode='w') as file_obj:
np.save(file_obj, stdnt_labels)
return stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels
def train_student(dataset, nb_teachers):
"""
This function trains a student using predictions made by an ensemble of
teachers. The student and teacher models are trained using the same
neural network architecture.
:param dataset: string corresponding to mnist, cifar10, or svhn
:param nb_teachers: number of teachers (in the ensemble) to learn from
:return: True if student training went well
"""
assert input.create_dir_if_needed(FLAGS.train_dir)
# Call helper function to prepare student data using teacher predictions
stdnt_dataset = prepare_student_data(dataset, nb_teachers, save=True)
# Unpack the student dataset
stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels = stdnt_dataset
# Prepare checkpoint filename and path
if FLAGS.deeper:
ckpt_path = FLAGS.train_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_student_deeper.ckpt' #NOLINT(long-line)
else:
ckpt_path = FLAGS.train_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_student.ckpt' # NOLINT(long-line)
# Start student training
assert deep_cnn.train(stdnt_data, stdnt_labels, ckpt_path)
# Compute final checkpoint name for student (with max number of steps)
ckpt_path_final = ckpt_path + '-' + str(FLAGS.max_steps - 1)
# Compute student label predictions on remaining chunk of test set
student_preds = deep_cnn.softmax_preds(stdnt_test_data, ckpt_path_final)
# Compute teacher accuracy
precision = metrics.accuracy(student_preds, stdnt_test_labels)
print('Precision of student after training: ' + str(precision))
return True
def main(argv=None): # pylint: disable=unused-argument
# Run student training according to values specified in flags
assert train_student(FLAGS.dataset, FLAGS.nb_teachers)
if __name__ == '__main__':
tf.app.run()
| unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/models/differential_privacy/multiple_teachers/train_student.py | Python | bsd-2-clause | 9,187 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default TEST_CONFIG_OVERRIDE for python repos.
# You can copy this file into your directory, then it will be inported from
# the noxfile.py.
# The source of truth:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/master/noxfile_config.py
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
"ignored_versions": ["2.7"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": True,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
| googleapis/python-bigquery-reservation | samples/snippets/noxfile_config.py | Python | apache-2.0 | 1,606 |
from flask import Blueprint, g, url_for
from ..errors import ValidationError, bad_request, not_found
from ..auth import auth
from ..decorators import rate_limit, hub_active
api = Blueprint('api', __name__)
def get_catalog():
return {
'hub': url_for('api.get_hub', _external=True),
'endpoints': url_for('api.get_endpoints', _external=True),
'endpoints_group': url_for('api.get_groups', _external=True),
'endpoints_status': url_for('api.get_endpointstatuses', _external=True),
'endpoints_types': url_for('api.get_endpoint_types', _external=True),
'users':url_for('api.get_users', _external=True)
}
@api.errorhandler(ValidationError)
def validation_error(e):
return bad_request(str(e))
@api.errorhandler(400)
def bad_request_error(e):
return bad_request('invalid request')
@api.before_request
@hub_active
@auth.login_required
@rate_limit(limit=5, period=15)
def before_request():
pass
@api.after_request
def after_request(response):
if hasattr(g, 'headers'):
response.headers.extend(g.headers)
return response
# do this last to avoid circular dependencies
from . import hub_conf,endpoint_conf,users,operate,endpoint_status,endpoint_types,interfaces,endpoint_group,properties,schedule#,students,registrations,classes | punitvanjani/test1 | api/v1/__init__.py | Python | mit | 1,341 |
from rackattack import api
class Node(api.Node):
def __init__(self, ipcClient, allocation, name, info):
assert 'id' in info
assert 'primaryMACAddress' in info
assert 'secondaryMACAddress' in info
assert 'ipAddress' in info
self._ipcClient = ipcClient
self._allocation = allocation
self._name = name
self._info = info
self._id = info['id']
def rootSSHCredentials(self):
return self._ipcClient.call(
"node__rootSSHCredentials", allocationID=self._allocation._idForNodeIPC(), nodeID=self._id)
def id(self):
return self._id
def name(self):
return self._name
def primaryMACAddress(self):
return self._info['primaryMACAddress']
def secondaryMACAddress(self):
return self._info['secondaryMACAddress']
def NICBondings(self):
return self._info.get('NICBondings', None)
def getOtherMACAddresses(self):
return self._info.get("otherMACAddresses", None)
def getMacAddress(self, macName):
return self._info[macName]
def ipAddress(self):
return self._info['ipAddress']
def coldRestart(self):
return self._ipcClient.call(
'node__coldRestart', allocationID=self._allocation._idForNodeIPC(), nodeID=self._id)
def fetchSerialLog(self):
connection = self._ipcClient.urlopen("/host/%s/serialLog" % self._id)
try:
return connection.read()
finally:
connection.close()
def networkInfo(self):
return self._info
def answerDHCP(self, shouldAnswer):
return self._ipcClient.call(
'node__answerDHCP', allocationID=self._allocation._idForNodeIPC(),
nodeID=self._id, shouldAnswer=shouldAnswer)
| Stratoscale/rackattack-api | py/rackattack/tcp/node.py | Python | apache-2.0 | 1,797 |
#!/usr/bin/env python
"""Execute the tests for the pair_align program.
The golden test outputs are generated by the script generate_outputs.sh.
You have to give the root paths to the source and the binaries as arguments to
the program. These are the paths to the directory that contains the 'projects'
directory.
Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH
"""
import logging
import os.path
import sys
# Automagically add util/py_lib to PYTHONPATH environment variable.
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..',
'..', '..', 'util', 'py_lib'))
sys.path.insert(0, path)
import seqan.app_tests as app_tests
def main(source_base, binary_base):
"""Main entry point of the script."""
print 'Executing test for razers'
print '========================='
print
ph = app_tests.TestPathHelper(
source_base, binary_base,
'core/apps/razers/tests') # tests dir
# ============================================================
# Auto-detect the binary path.
# ============================================================
path_to_program = app_tests.autolocateBinary(
binary_base, 'core/apps/razers', 'razers')
# ============================================================
# Built TestConf list.
# ============================================================
# Build list with TestConf objects, analoguely to how the output
# was generated in generate_outputs.sh.
conf_list = []
# ============================================================
# Run Adeno Single-End Tests
# ============================================================
# We run the following for all read lengths we have reads for.
for rl in [36, 100]:
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1.stdout' % rl),
args=['--low-memory',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl,),
'-o', ph.outFile('se-adeno-reads%d_1.razers' % rl)],
to_diff=[(ph.inFile('se-adeno-reads%d_1.razers' % rl),
ph.outFile('se-adeno-reads%d_1.razers' % rl)),
(ph.inFile('se-adeno-reads%d_1.stdout' % rl),
ph.outFile('se-adeno-reads%d_1.stdout' % rl))])
conf_list.append(conf)
# Allow indels.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id.stdout' % rl),
args=['--low-memory',
'-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id.razers' % rl)],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id.razers' % rl),
ph.outFile('se-adeno-reads%d_1-id.razers' % rl)),
(ph.inFile('se-adeno-reads%d_1-id.stdout' % rl),
ph.outFile('se-adeno-reads%d_1-id.stdout' % rl))])
conf_list.append(conf)
# Compute forward/reverse matches only.
for o in ['-r', '-f']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id%s.stdout' % (rl, o)),
args=['--low-memory',
'-id', o,
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id%s.razers' % (rl, o))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id%s.razers' % (rl, o)),
ph.outFile('se-adeno-reads%d_1-id%s.razers' % (rl, o))),
(ph.inFile('se-adeno-reads%d_1-id%s.stdout' % (rl, o)),
ph.outFile('se-adeno-reads%d_1-id%s.stdout' % (rl, o)))])
conf_list.append(conf)
# Compute with different identity rates.
for i in range(90, 101):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id-i%d.stdout' % (rl, i)),
args=['--low-memory',
'-id', '-i', str(i),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id-i%d.razers' % (rl, i))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id-i%d.razers' % (rl, i)),
ph.outFile('se-adeno-reads%d_1-id-i%d.razers' % (rl, i))),
(ph.inFile('se-adeno-reads%d_1-id-i%d.stdout' % (rl, i)),
ph.outFile('se-adeno-reads%d_1-id-i%d.stdout' % (rl, i)))])
conf_list.append(conf)
# Compute with different output formats.
for of, suffix in enumerate(['razers', 'fa', 'eland', 'gff']):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id-of%d.stdout' % (rl, of)),
args=['--low-memory',
'-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id-of%d.%s' % (rl, of, suffix))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id-of%d.%s' % (rl, of, suffix)),
ph.outFile('se-adeno-reads%d_1-id-of%d.%s' % (rl, of, suffix))),
(ph.inFile('se-adeno-reads%d_1-id-of%d.stdout' % (rl, of)),
ph.outFile('se-adeno-reads%d_1-id-of%d.stdout' % (rl, of)))])
conf_list.append(conf)
# Compute with different sort orders.
for so in [0, 1]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('se-adeno-reads%d_1-id-so%d.stdout' % (rl, so)),
args=['--low-memory',
'-id', '-so', str(so),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
'-o', ph.outFile('se-adeno-reads%d_1-id-so%d.razers' % (rl, so))],
to_diff=[(ph.inFile('se-adeno-reads%d_1-id-so%d.razers' % (rl, so)),
ph.outFile('se-adeno-reads%d_1-id-so%d.razers' % (rl, so))),
(ph.inFile('se-adeno-reads%d_1-id-so%d.stdout' % (rl, so)),
ph.outFile('se-adeno-reads%d_1-id-so%d.stdout' % (rl, so)))])
conf_list.append(conf)
# ============================================================
# Run Adeno Paired-End Tests
# ============================================================
# We run the following for all read lengths we have reads for.
for rl in [36, 100]:
# Run with default options.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2.stdout' % rl),
args=['--low-memory',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2.razers' % rl)],
to_diff=[(ph.inFile('pe-adeno-reads%d_2.razers' % rl),
ph.outFile('pe-adeno-reads%d_2.razers' % rl)),
(ph.inFile('pe-adeno-reads%d_2.stdout' % rl),
ph.outFile('pe-adeno-reads%d_2.stdout' % rl))])
conf_list.append(conf)
# Allow indels.
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id.stdout' % rl),
args=['--low-memory',
'-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id.razers' % rl)],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id.razers' % rl),
ph.outFile('pe-adeno-reads%d_2-id.razers' % rl)),
(ph.inFile('pe-adeno-reads%d_2-id.stdout' % rl),
ph.outFile('pe-adeno-reads%d_2-id.stdout' % rl))])
conf_list.append(conf)
# Compute forward/reverse matches only.
for o in ['-r', '-f']:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id%s.stdout' % (rl, o)),
args=['--low-memory',
'-id', o,
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id%s.razers' % (rl, o))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id%s.razers' % (rl, o)),
ph.outFile('pe-adeno-reads%d_2-id%s.razers' % (rl, o))),
(ph.inFile('pe-adeno-reads%d_2-id%s.stdout' % (rl, o)),
ph.outFile('pe-adeno-reads%d_2-id%s.stdout' % (rl, o)))])
conf_list.append(conf)
# Compute with different identity rates.
for i in range(90, 101):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id-i%d.stdout' % (rl, i)),
args=['--low-memory',
'-id', '-i', str(i),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id-i%d.razers' % (rl, i))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id-i%d.razers' % (rl, i)),
ph.outFile('pe-adeno-reads%d_2-id-i%d.razers' % (rl, i))),
(ph.inFile('pe-adeno-reads%d_2-id-i%d.stdout' % (rl, i)),
ph.outFile('pe-adeno-reads%d_2-id-i%d.stdout' % (rl, i)))])
conf_list.append(conf)
# Compute with different output formats.
for of, suffix in enumerate(['razers', 'fa', 'eland', 'gff']):
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id-of%d.stdout' % (rl, of)),
args=['--low-memory',
'-id',
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id-of%d.%s' % (rl, of, suffix))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id-of%d.%s' % (rl, of, suffix)),
ph.outFile('pe-adeno-reads%d_2-id-of%d.%s' % (rl, of, suffix))),
(ph.inFile('pe-adeno-reads%d_2-id-of%d.stdout' % (rl, of)),
ph.outFile('pe-adeno-reads%d_2-id-of%d.stdout' % (rl, of)))])
conf_list.append(conf)
# Compute with different sort orders.
for so in [0, 1]:
conf = app_tests.TestConf(
program=path_to_program,
redir_stdout=ph.outFile('pe-adeno-reads%d_2-id-so%d.stdout' % (rl, so)),
args=['--low-memory',
'-id', '-so', str(so),
ph.inFile('adeno-genome.fa'),
ph.inFile('adeno-reads%d_1.fa' % rl),
ph.inFile('adeno-reads%d_2.fa' % rl),
'-o', ph.outFile('pe-adeno-reads%d_2-id-so%d.razers' % (rl, so))],
to_diff=[(ph.inFile('pe-adeno-reads%d_2-id-so%d.razers' % (rl, so)),
ph.outFile('pe-adeno-reads%d_2-id-so%d.razers' % (rl, so))),
(ph.inFile('pe-adeno-reads%d_2-id-so%d.stdout' % (rl, so)),
ph.outFile('pe-adeno-reads%d_2-id-so%d.stdout' % (rl, so)))])
conf_list.append(conf)
# Execute the tests.
failures = 0
for conf in conf_list:
res = app_tests.runTest(conf)
# Output to the user.
print ' '.join(['razers'] + conf.args),
if res:
print 'OK'
else:
failures += 1
print 'FAILED'
# Cleanup.
ph.deleteTempDir()
print '=============================='
print ' total tests: %d' % len(conf_list)
print ' failed tests: %d' % failures
print 'successful tests: %d' % (len(conf_list) - failures)
print '=============================='
# Compute and return return code.
return failures != 0
if __name__ == '__main__':
sys.exit(app_tests.main(main))
| bkahlert/seqan-research | raw/workshop13/workshop2013-data-20130926/trunk/core/apps/razers/tests/run_tests.py | Python | mit | 13,292 |
from django.test import TestCase
from lists.forms import EMPTY_LIST_ERROR, ItemForm
from lists.models import Item, List
class ItemFormTest(TestCase):
def test_form_item_input_has_placeholder_and_css_classes(self):
form = ItemForm()
self.assertIn('placeholder="Enter a to-do item"', form.as_p())
self.assertIn('class="form-control input-lg"', form.as_p())
def test_form_validation_for_blank_items(self):
form = ItemForm(data={'text': ''})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['text'], [EMPTY_LIST_ERROR])
def test_form_save_handles_saving_to_a_list(self):
list_ = List.objects.create()
form = ItemForm(data={'text': 'do me'})
new_item = form.save(for_list=list_)
self.assertEqual(new_item, Item.objects.first())
self.assertEqual(new_item.text, 'do me')
self.assertEqual(new_item.list, list_) | MilesDuronCIMAT/book_exercises | chapter_11/lists/tests/test_forms.py | Python | mit | 906 |
import getpass
import sys
import traceback
import paramiko
import interactive
import auth
import util
import json
import requests
class Executer(object):
def __init__(self, name, username, namespace, exec_endpoint='exec.alauda.cn', verbose=False):
self.name = name if username == namespace else '{}/{}'.format(namespace, name)
self.namespace = namespace
self.username = username
self.exec_endpoint = exec_endpoint
self.port = 4022
self.client = None
self.chan = None
self.verbose = verbose
def connect(self):
if self.verbose:
print('*** Connecting...')
# connect to the exec_endpoint
self.client = paramiko.SSHClient()
self.client.load_system_host_keys()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
password = getpass.getpass('Password for %s@%s: ' % (self.username, self.exec_endpoint))
self.client.connect(self.exec_endpoint,
self.port,
username=self.username,
password=password,
allow_agent=False,
look_for_keys=False)
if self.verbose:
print(repr(self.client.get_transport()))
def execute(self, command, *args):
try:
self.connect()
transport = self.client.get_transport()
self.chan = transport.open_session()
self.chan.get_pty()
self.chan.exec_command('{} {} {}'.format(self.name, command, ' '.join(args)))
interactive.interactive_shell(self.chan)
self.close()
except Exception as e:
print('*** Caught exception: %s: %s' % (e.__class__, e))
traceback.print_exc()
try:
self.close()
except:
pass
sys.exit(1)
def close(self):
if self.chan:
self.chan.close()
self.client.close()
@classmethod
def fetch(cls, name, namespace=None):
service_name = name.split(".")[0]
api_endpoint, token, username = auth.load_token()
url = api_endpoint + 'services/{}/'.format(namespace or username) + service_name
headers = auth.build_headers(token)
r = requests.get(url, headers=headers)
util.check_response(r)
data = json.loads(r.text)
# print r.text
executer = cls(name=name,
username=username,
exec_endpoint=data['exec_endpoint'],
namespace=data['namespace'])
return executer
| Lupino/alauda-CLI | alaudacli/execute.py | Python | apache-2.0 | 2,679 |
import os
import sys
from setuptools import setup
if sys.version_info < (3, 2):
print("Sorry, djangocms-lab-members currently requires Python 3.2+.")
sys.exit(1)
# From: https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
install_requires = [
'Django>=1.7',
'django-cms>=3.0.7,<3.2',
'django-lab-members>=0.3.0',
'djangocms-lab-publications>=0.1.3',
]
setup(
name='djangocms-lab-members',
version='0.2.0',
packages=['cms_lab_members'],
include_package_data=True,
license='BSD License',
description='A Django app to extend django-lab-members with django CMS-specific features',
long_description=(read('README.rst') + '\n\n' +
read('CHANGELOG.rst')),
url='https://github.com/mfcovington/djangocms-lab-members',
author='Michael F. Covington',
author_email='mfcovington@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=install_requires,
)
| mfcovington/djangocms-lab-members | setup.py | Python | bsd-3-clause | 2,005 |
# Addresses a bug in the way Python 3.5+ handles
# creation of map constants
opts = {'highlight': True,
'start_line': -1,
'end_line': None
}
print(opts)
| moagstar/python-uncompyle6 | test/simple_source/expression/05_const_map.py | Python | mit | 189 |
# -*- coding: utf-8 -*-
#
# Molecular Blender
# Filename: util.py
# Copyright (C) 2017 Shane Parker, Joshua Szekely
#
# This file is part of Molecular Blender.
#
# Molecular Blender is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# Molecular Blender is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Molecular Blender; see COPYING.
# If not, see <http://www.gnu.org/licenses/>.
#
"""Common utilities for Molecular Blender"""
import os
import time
DEBUG = os.environ.get('MB_DEBUG', '') == '1'
class Timer(object):
"""Convenience class for measuring timing of chunks of code"""
def __init__(self):
"""Start a new timer"""
self.last_time = time.time()
def tick(self):
"""Set a new reference time and return the time since the last tick"""
lt, self.last_time = self.last_time, time.time()
return self.last_time - lt
def tick_print(self, label):
"""Calls tick and automatically prints the output with the given label"""
out = self.tick()
if DEBUG:
print(" %40s: %.4f sec" % (label, out))
return out
def stopwatch(routine, verbose=DEBUG):
"""Decorator to measure time in a function using blender timer"""
def stopwatch_dec(func):
"""Specific decorator"""
def wrapper(*args, **kwargs):
"""Wrapper for decorator to measure timer"""
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if verbose:
print("%.4f sec elapsed in routine %s" %
((end - start), routine))
return out
return wrapper
return stopwatch_dec
def unique_name(name, existing_names, starting_suffix=None):
"""If name is not in existing_names, returns name. Otherwise, returns name + "0", 1, 2, etc."""
testname = name if starting_suffix is None else "%s%d" % (
name, starting_suffix)
if testname in existing_names:
i = 0 if starting_suffix is None else starting_suffix + 1
while True:
testname = "%s%d" % (name, i)
if testname in existing_names:
i += 1
else:
return testname
else:
return testname
| smparker/Molecular-Blender | util.py | Python | gpl-3.0 | 2,699 |
# Authors:
# Trevor Perrin
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""cryptomath module
This module has basic math/crypto code."""
from __future__ import print_function
import os
import math
import base64
import binascii
from .compat import *
# **************************************************************************
# Load Optional Modules
# **************************************************************************
# Try to load M2Crypto/OpenSSL
try:
from M2Crypto import m2
m2cryptoLoaded = True
except ImportError:
m2cryptoLoaded = False
#Try to load GMPY
try:
import gmpy
gmpyLoaded = True
except ImportError:
gmpyLoaded = False
#Try to load pycrypto
try:
import Crypto.Cipher.AES
pycryptoLoaded = True
except ImportError:
pycryptoLoaded = False
# **************************************************************************
# PRNG Functions
# **************************************************************************
# Check that os.urandom works
import zlib
length = len(zlib.compress(os.urandom(1000)))
assert(length > 900)
def getRandomBytes(howMany):
b = bytearray(os.urandom(howMany))
assert(len(b) == howMany)
return b
prngName = "os.urandom"
# **************************************************************************
# Simple hash functions
# **************************************************************************
import hmac
import hashlib
def MD5(b):
return bytearray(hashlib.md5(compat26Str(b)).digest())
def SHA1(b):
return bytearray(hashlib.sha1(compat26Str(b)).digest())
def SHA256(b):
return bytearray(hashlib.sha256(compat26Str(b)).digest())
def HMAC_MD5(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.md5).digest())
def HMAC_SHA1(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha1).digest())
def HMAC_SHA256(k, b):
k = compatHMAC(k)
b = compatHMAC(b)
return bytearray(hmac.new(k, b, hashlib.sha256).digest())
# **************************************************************************
# Converter Functions
# **************************************************************************
def bytesToNumber(b):
total = 0
multiplier = 1
for count in range(len(b)-1, -1, -1):
byte = b[count]
total += multiplier * byte
multiplier *= 256
return total
def numberToByteArray(n, howManyBytes=None):
"""Convert an integer into a bytearray, zero-pad to howManyBytes.
The returned bytearray may be smaller than howManyBytes, but will
not be larger. The returned bytearray will contain a big-endian
encoding of the input integer (n).
"""
if howManyBytes == None:
howManyBytes = numBytes(n)
b = bytearray(howManyBytes)
for count in range(howManyBytes-1, -1, -1):
b[count] = int(n % 256)
n >>= 8
return b
def mpiToNumber(mpi): #mpi is an openssl-format bignum string
if (ord(mpi[4]) & 0x80) !=0: #Make sure this is a positive number
raise AssertionError()
b = bytearray(mpi[4:])
return bytesToNumber(b)
def numberToMPI(n):
b = numberToByteArray(n)
ext = 0
#If the high-order bit is going to be set,
#add an extra byte of zeros
if (numBits(n) & 0x7)==0:
ext = 1
length = numBytes(n) + ext
b = bytearray(4+ext) + b
b[0] = (length >> 24) & 0xFF
b[1] = (length >> 16) & 0xFF
b[2] = (length >> 8) & 0xFF
b[3] = length & 0xFF
return bytes(b)
# **************************************************************************
# Misc. Utility Functions
# **************************************************************************
def numBits(n):
if n==0:
return 0
s = "%x" % n
return ((len(s)-1)*4) + \
{'0':0, '1':1, '2':2, '3':2,
'4':3, '5':3, '6':3, '7':3,
'8':4, '9':4, 'a':4, 'b':4,
'c':4, 'd':4, 'e':4, 'f':4,
}[s[0]]
return int(math.floor(math.log(n, 2))+1)
def numBytes(n):
if n==0:
return 0
bits = numBits(n)
return int(math.ceil(bits / 8.0))
# **************************************************************************
# Big Number Math
# **************************************************************************
def getRandomNumber(low, high):
if low >= high:
raise AssertionError()
howManyBits = numBits(high)
howManyBytes = numBytes(high)
lastBits = howManyBits % 8
while 1:
bytes = getRandomBytes(howManyBytes)
if lastBits:
bytes[0] = bytes[0] % (1 << lastBits)
n = bytesToNumber(bytes)
if n >= low and n < high:
return n
def gcd(a,b):
a, b = max(a,b), min(a,b)
while b:
a, b = b, a % b
return a
def lcm(a, b):
return (a * b) // gcd(a, b)
#Returns inverse of a mod b, zero if none
#Uses Extended Euclidean Algorithm
def invMod(a, b):
c, d = a, b
uc, ud = 1, 0
while c != 0:
q = d // c
c, d = d-(q*c), c
uc, ud = ud - (q * uc), uc
if d == 1:
return ud % b
return 0
if gmpyLoaded:
def powMod(base, power, modulus):
base = gmpy.mpz(base)
power = gmpy.mpz(power)
modulus = gmpy.mpz(modulus)
result = pow(base, power, modulus)
return long(result)
else:
def powMod(base, power, modulus):
if power < 0:
result = pow(base, power*-1, modulus)
result = invMod(result, modulus)
return result
else:
return pow(base, power, modulus)
#Pre-calculate a sieve of the ~100 primes < 1000:
def makeSieve(n):
sieve = list(range(n))
for count in range(2, int(math.sqrt(n))+1):
if sieve[count] == 0:
continue
x = sieve[count] * 2
while x < len(sieve):
sieve[x] = 0
x += sieve[count]
sieve = [x for x in sieve[2:] if x]
return sieve
sieve = makeSieve(1000)
def isPrime(n, iterations=5, display=False):
#Trial division with sieve
for x in sieve:
if x >= n: return True
if n % x == 0: return False
#Passed trial division, proceed to Rabin-Miller
#Rabin-Miller implemented per Ferguson & Schneier
#Compute s, t for Rabin-Miller
if display: print("*", end=' ')
s, t = n-1, 0
while s % 2 == 0:
s, t = s//2, t+1
#Repeat Rabin-Miller x times
a = 2 #Use 2 as a base for first iteration speedup, per HAC
for count in range(iterations):
v = powMod(a, s, n)
if v==1:
continue
i = 0
while v != n-1:
if i == t-1:
return False
else:
v, i = powMod(v, 2, n), i+1
a = getRandomNumber(2, n)
return True
def getRandomPrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = ((2 ** (bits-1)) * 3) // 2
high = 2 ** bits - 30
p = getRandomNumber(low, high)
p += 29 - (p % 30)
while 1:
if display: print(".", end=' ')
p += 30
if p >= high:
p = getRandomNumber(low, high)
p += 29 - (p % 30)
if isPrime(p, display=display):
return p
#Unused at the moment...
def getRandomSafePrime(bits, display=False):
if bits < 10:
raise AssertionError()
#The 1.5 ensures the 2 MSBs are set
#Thus, when used for p,q in RSA, n will have its MSB set
#
#Since 30 is lcm(2,3,5), we'll set our test numbers to
#29 % 30 and keep them there
low = (2 ** (bits-2)) * 3//2
high = (2 ** (bits-1)) - 30
q = getRandomNumber(low, high)
q += 29 - (q % 30)
while 1:
if display: print(".", end=' ')
q += 30
if (q >= high):
q = getRandomNumber(low, high)
q += 29 - (q % 30)
#Ideas from Tom Wu's SRP code
#Do trial division on p and q before Rabin-Miller
if isPrime(q, 0, display=display):
p = (2 * q) + 1
if isPrime(p, display=display):
if isPrime(q, display=display):
return p
| scheib/chromium | third_party/tlslite/tlslite/utils/cryptomath.py | Python | bsd-3-clause | 8,434 |
# GUI reader side: like pipes-gui1, but make root window and mainloop explicit
from tkinter import *
from PP4E.Gui.Tools.guiStreams import redirectedGuiShellCmd
def launch():
redirectedGuiShellCmd('python -u pipe-nongui.py')
window = Tk()
Button(window, text='GO!', command=launch).pack()
window.mainloop()
| simontakite/sysadmin | pythonscripts/programmingpython/Gui/Tools/pipe-gui2.py | Python | gpl-2.0 | 325 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReferenceFrame'
db.create_table(u'ddsc_core_referenceframe', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=12)),
('description', self.gf('django.db.models.fields.CharField')(unique=True, max_length=60)),
('begin_date', self.gf('django.db.models.fields.DateField')()),
('end_date', self.gf('django.db.models.fields.DateField')()),
('group', self.gf('django.db.models.fields.CharField')(max_length=60, null=True)),
))
db.send_create_signal(u'ddsc_core', ['ReferenceFrame'])
def backwards(self, orm):
# Deleting model 'ReferenceFrame'
db.delete_table(u'ddsc_core_referenceframe')
models = {
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['ddsc_core'] | ddsc/ddsc-core | ddsc_core/migrations/0009_add_model_ReferenceFrame.py | Python | mit | 6,428 |
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.v1.validators.base import BasicValidator
from nailgun.api.v1.validators.json_schema import plugin_link
class PluginLinkValidator(BasicValidator):
collection_schema = plugin_link.PLUGIN_LINKS_SCHEMA
@classmethod
def validate(cls, data):
parsed = super(PluginLinkValidator, cls).validate(data)
cls.validate_schema(
parsed,
plugin_link.PLUGIN_LINK_SCHEMA
)
return parsed
@classmethod
def validate_update(cls, data, instance):
parsed = super(PluginLinkValidator, cls).validate(data)
cls.validate_schema(
parsed,
plugin_link.PLUGIN_LINK_UPDATE_SCHEMA
)
return parsed
@classmethod
def validate_create(cls, data):
return cls.validate(data)
| huntxu/fuel-web | nailgun/nailgun/api/v1/validators/plugin_link.py | Python | apache-2.0 | 1,438 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"KeywordPlanCampaignKeywordErrorEnum",},
)
class KeywordPlanCampaignKeywordErrorEnum(proto.Message):
r"""Container for enum describing possible errors from applying a
keyword plan campaign keyword.
"""
class KeywordPlanCampaignKeywordError(proto.Enum):
r"""Enum describing possible errors from applying a keyword plan
campaign keyword.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CAMPAIGN_KEYWORD_IS_POSITIVE = 8
__all__ = tuple(sorted(__protobuf__.manifest))
| googleads/google-ads-python | google/ads/googleads/v9/errors/types/keyword_plan_campaign_keyword_error.py | Python | apache-2.0 | 1,277 |
import unittest
from nose.plugins.skip import SkipTest
import numpy
import theano
import theano.tensor as T
from theano.tests import unittest_tools as utt
from theano.tensor.signal import conv
from theano.tensor.basic import _allclose
class TestSignalConv2D(unittest.TestCase):
def setUp(self):
utt.seed_rng()
def validate(self, image_shape, filter_shape, out_dim, verify_grad=True):
image_dim = len(image_shape)
filter_dim = len(filter_shape)
input = T.TensorType('float64', [False] * image_dim)()
filters = T.TensorType('float64', [False] * filter_dim)()
bsize = image_shape[0]
if image_dim != 3:
bsize = 1
nkern = filter_shape[0]
if filter_dim != 3:
nkern = 1
# THEANO IMPLEMENTATION ############
# we create a symbolic function so that verify_grad can work
def sym_conv2d(input, filters):
return conv.conv2d(input, filters)
output = sym_conv2d(input, filters)
assert output.ndim == out_dim
theano_conv = theano.function([input, filters], output)
# initialize input and compute result
image_data = numpy.random.random(image_shape)
filter_data = numpy.random.random(filter_shape)
theano_output = theano_conv(image_data, filter_data)
# REFERENCE IMPLEMENTATION ############
out_shape2d = numpy.array(image_shape[-2:]) - numpy.array(filter_shape[-2:]) + 1
ref_output = numpy.zeros(tuple(out_shape2d))
# reshape as 3D input tensors to make life easier
image_data3d = image_data.reshape((bsize,) + image_shape[-2:])
filter_data3d = filter_data.reshape((nkern,) + filter_shape[-2:])
# reshape theano output as 4D to make life easier
theano_output4d = theano_output.reshape((bsize, nkern,) +
theano_output.shape[-2:])
# loop over mini-batches (if required)
for b in range(bsize):
# loop over filters (if required)
for k in range(nkern):
image2d = image_data3d[b, :, :]
filter2d = filter_data3d[k, :, :]
output2d = numpy.zeros(ref_output.shape)
for row in range(ref_output.shape[0]):
for col in range(ref_output.shape[1]):
output2d[row, col] += (
image2d[row:row + filter2d.shape[0],
col:col + filter2d.shape[1]] *
filter2d[::-1, ::-1]
).sum()
self.assertTrue(_allclose(theano_output4d[b, k, :, :],
output2d))
# TEST GRADIENT ############
if verify_grad:
utt.verify_grad(sym_conv2d, [image_data, filter_data])
def test_basic(self):
"""
Basic functionality of nnet.conv.ConvOp is already tested by
its own test suite. We just have to test whether or not
signal.conv.conv2d can support inputs and filters of type
matrix or tensor3.
"""
if(not theano.tensor.nnet.conv.imported_scipy_signal and
theano.config.cxx == ""):
raise SkipTest("conv2d tests need SciPy or a c++ compiler")
self.validate((1, 4, 5), (2, 2, 3), out_dim=4, verify_grad=True)
self.validate((7, 5), (5, 2, 3), out_dim=3, verify_grad=False)
self.validate((3, 7, 5), (2, 3), out_dim=3, verify_grad=False)
self.validate((7, 5), (2, 3), out_dim=2, verify_grad=False)
def test_fail(self):
"""
Test that conv2d fails for dimensions other than 2 or 3.
"""
self.assertRaises(Exception, conv.conv2d, T.dtensor4(), T.dtensor3())
self.assertRaises(Exception, conv.conv2d, T.dtensor3(), T.dvector())
def test_bug_josh_reported(self):
"""
Test refers to a bug reported by Josh, when due to a bad merge these
few lines of code failed. See
http://groups.google.com/group/theano-dev/browse_thread/thread/8856e7ca5035eecb
"""
m1 = theano.tensor.matrix()
m2 = theano.tensor.matrix()
conv.conv2d(m1, m2)
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/tensor/signal/tests/test_conv.py | Python | gpl-2.0 | 4,258 |
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2017 sliptonic <shopinthewoods@gmail.com> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import FreeCADGui
import PathGui as PGui # ensure Path/Gui/Resources are loaded
import PathScripts.PathGui as PathGui
import PathScripts.PathOpGui as PathOpGui
from PySide import QtCore #, QtGui
__title__ = "Path Pocket Base Operation UI"
__author__ = "sliptonic (Brad Collette)"
__url__ = "https://www.freecadweb.org"
__doc__ = "Base page controller and command implementation for path pocket operations."
def translate(context, text, disambig=None):
return QtCore.QCoreApplication.translate(context, text, disambig)
FeaturePocket = 0x01
FeatureFacing = 0x02
FeatureOutline = 0x04
class TaskPanelOpPage(PathOpGui.TaskPanelPage):
'''Page controller class for pocket operations, supports:
FeaturePocket ... used for pocketing operation
FeatureFacing ... used for face milling operation
FeatureOutline ... used for pocket-shape operation
'''
def pocketFeatures(self):
'''pocketFeatures() ... return which features of the UI are supported by the operation.
FeaturePocket ... used for pocketing operation
FeatureFacing ... used for face milling operation
FeatureOutline ... used for pocket-shape operation
Must be overwritten by subclasses'''
pass # pylint: disable=unnecessary-pass
def getForm(self):
'''getForm() ... returns UI, adapted to the results from pocketFeatures()'''
form = FreeCADGui.PySideUic.loadUi(":/panels/PageOpPocketFullEdit.ui")
if not FeatureFacing & self.pocketFeatures():
form.facingWidget.hide()
form.clearEdges.hide()
if FeaturePocket & self.pocketFeatures():
form.extraOffset_label.setText(translate("PathPocket", "Pass Extension"))
form.extraOffset.setToolTip(translate("PathPocket", "The distance the facing operation will extend beyond the boundary shape."))
if not (FeatureOutline & self.pocketFeatures()):
form.useOutline.hide()
# if True:
# # currently doesn't have an effect or is experimental
# form.minTravel.hide()
return form
def updateMinTravel(self, obj, setModel=True):
if obj.UseStartPoint:
self.form.minTravel.setEnabled(True)
else:
self.form.minTravel.setChecked(False)
self.form.minTravel.setEnabled(False)
if setModel and obj.MinTravel != self.form.minTravel.isChecked():
obj.MinTravel = self.form.minTravel.isChecked()
def updateZigZagAngle(self, obj, setModel=True):
if obj.OffsetPattern in ['Offset', 'Spiral']:
self.form.zigZagAngle.setEnabled(False)
else:
self.form.zigZagAngle.setEnabled(True)
if setModel:
PathGui.updateInputField(obj, 'ZigZagAngle', self.form.zigZagAngle)
def getFields(self, obj):
'''getFields(obj) ... transfers values from UI to obj's proprties'''
if obj.CutMode != str(self.form.cutMode.currentText()):
obj.CutMode = str(self.form.cutMode.currentText())
if obj.StepOver != self.form.stepOverPercent.value():
obj.StepOver = self.form.stepOverPercent.value()
if obj.OffsetPattern != str(self.form.offsetPattern.currentText()):
obj.OffsetPattern = str(self.form.offsetPattern.currentText())
PathGui.updateInputField(obj, 'ExtraOffset', self.form.extraOffset)
self.updateToolController(obj, self.form.toolController)
self.updateCoolant(obj, self.form.coolantController)
self.updateZigZagAngle(obj)
if obj.UseStartPoint != self.form.useStartPoint.isChecked():
obj.UseStartPoint = self.form.useStartPoint.isChecked()
if FeatureOutline & self.pocketFeatures():
if obj.UseOutline != self.form.useOutline.isChecked():
obj.UseOutline = self.form.useOutline.isChecked()
self.updateMinTravel(obj)
if FeatureFacing & self.pocketFeatures():
if obj.BoundaryShape != str(self.form.boundaryShape.currentText()):
obj.BoundaryShape = str(self.form.boundaryShape.currentText())
if obj.ClearEdges != self.form.clearEdges.isChecked():
obj.ClearEdges = self.form.clearEdges.isChecked()
def setFields(self, obj):
'''setFields(obj) ... transfers obj's property values to UI'''
self.form.stepOverPercent.setValue(obj.StepOver)
self.form.extraOffset.setText(FreeCAD.Units.Quantity(obj.ExtraOffset.Value, FreeCAD.Units.Length).UserString)
self.form.useStartPoint.setChecked(obj.UseStartPoint)
if FeatureOutline & self.pocketFeatures():
self.form.useOutline.setChecked(obj.UseOutline)
self.form.zigZagAngle.setText(FreeCAD.Units.Quantity(obj.ZigZagAngle, FreeCAD.Units.Angle).UserString)
self.updateZigZagAngle(obj, False)
self.form.minTravel.setChecked(obj.MinTravel)
self.updateMinTravel(obj, False)
self.selectInComboBox(obj.OffsetPattern, self.form.offsetPattern)
self.selectInComboBox(obj.CutMode, self.form.cutMode)
self.setupToolController(obj, self.form.toolController)
self.setupCoolant(obj, self.form.coolantController)
if FeatureFacing & self.pocketFeatures():
self.selectInComboBox(obj.BoundaryShape, self.form.boundaryShape)
self.form.clearEdges.setChecked(obj.ClearEdges)
def getSignalsForUpdate(self, obj):
'''getSignalsForUpdate(obj) ... return list of signals for updating obj'''
signals = []
signals.append(self.form.cutMode.currentIndexChanged)
signals.append(self.form.offsetPattern.currentIndexChanged)
signals.append(self.form.stepOverPercent.editingFinished)
signals.append(self.form.zigZagAngle.editingFinished)
signals.append(self.form.toolController.currentIndexChanged)
signals.append(self.form.extraOffset.editingFinished)
signals.append(self.form.useStartPoint.clicked)
signals.append(self.form.useOutline.clicked)
signals.append(self.form.minTravel.clicked)
signals.append(self.form.coolantController.currentIndexChanged)
if FeatureFacing & self.pocketFeatures():
signals.append(self.form.boundaryShape.currentIndexChanged)
signals.append(self.form.clearEdges.clicked)
return signals
| sanguinariojoe/FreeCAD | src/Mod/Path/PathScripts/PathPocketBaseGui.py | Python | lgpl-2.1 | 8,004 |
#! /usr/bin/python
"""
dispatch.py
By Paul Malmsten, 2010
pmalmsten@gmail.com
This example continuously reads the serial port and dispatches packets
which arrive to appropriate methods for processing.
"""
from xbee.helpers.dispatch import Dispatch
import serial
PORT = '/dev/ttyUSB0'
BAUD_RATE = 9600
# Open serial port
ser = serial.Serial(PORT, BAUD_RATE)
# Create handlers for various packet types
def status_handler(name, packet):
print "Status Update - Status is now: ", packet['status']
def io_sample_handler(name, packet):
print "Samples Received: ", packet['samples']
# When a Dispatch is created with a serial port, it will automatically
# create an XBee object on your behalf for accessing the device.
# If you wish, you may explicitly provide your own XBee:
#
# xbee = XBee(ser)
# dispatch = Dispatch(xbee=xbee)
#
# Functionally, these are the same.
dispatch = Dispatch(ser)
# Register the packet handlers with the dispatch:
# The string name allows one to distinguish between mutiple registrations
# for a single callback function
# The second argument is the function to call
# The third argument is a function which determines whether to call its
# associated callback when a packet arrives. It should return a boolean.
dispatch.register(
"status",
status_handler,
lambda packet: packet['id']=='status'
)
dispatch.register(
"io_data",
io_sample_handler,
lambda packet: packet['id']=='rx_io_data'
)
try:
# run() will loop infinitely while waiting for and processing
# packets which arrive. Don't expect it to return (unless an
# exception occurs).
dispatch.run()
except KeyboardInterrupt:
pass
ser.close()
| MengGuo/Jackal_Velodyne_Duke | xbee_communication/XBee-2.2.3/examples/dispatch.py | Python | gpl-2.0 | 1,695 |
""" SQLAlchemy models """
# pylint: disable=no-init
# pylint: disable=too-few-public-methods
# pylint: disable=missing-docstring
from sqlalchemy import Column, Integer, Date, DateTime, Numeric, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Houses(Base):
__tablename__ = 'houses'
house_id = Column(Integer, primary_key=True, autoincrement=False)
name = Column(String(32))
sname = Column(String(32))
iga = Column(Numeric(precision=8, scale=2))
ciga = Column(Numeric(precision=8, scale=2))
ega = Column(Numeric(precision=8, scale=2))
class MonitorDevices(Base):
__tablename__ = 'monitor_devices'
device_id = Column(Integer, primary_key=True, autoincrement=False)
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
name = Column(String(32))
class Circuits(Base):
__tablename__ = 'circuits'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
circuit_id = Column(String(32), primary_key=True, autoincrement=False)
name = Column(String(32))
description = Column(String(80))
class EnergyHourly(Base):
__tablename__ = 'energy_hourly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
device_id = Column(Integer, ForeignKey('monitor_devices.device_id'), primary_key=True)
date = Column(DateTime, primary_key=True)
adjusted_load = Column(Numeric(precision=14, scale=6))
solar = Column(Numeric(precision=14, scale=6))
used = Column(Numeric(precision=14, scale=6))
water_heater = Column(Numeric(precision=14, scale=6))
ashp = Column(Numeric(precision=14, scale=6))
water_pump = Column(Numeric(precision=14, scale=6))
dryer = Column(Numeric(precision=14, scale=6))
washer = Column(Numeric(precision=14, scale=6))
dishwasher = Column(Numeric(precision=14, scale=6))
stove = Column(Numeric(precision=14, scale=6))
refrigerator = Column(Numeric(precision=14, scale=6))
living_room = Column(Numeric(precision=14, scale=6))
aux_heat_bedrooms = Column(Numeric(precision=14, scale=6))
aux_heat_living = Column(Numeric(precision=14, scale=6))
study = Column(Numeric(precision=14, scale=6))
barn = Column(Numeric(precision=14, scale=6))
basement_west = Column(Numeric(precision=14, scale=6))
basement_east = Column(Numeric(precision=14, scale=6))
ventilation = Column(Numeric(precision=14, scale=6))
ventilation_preheat = Column(Numeric(precision=14, scale=6))
kitchen_recept_rt = Column(Numeric(precision=14, scale=6))
class EnergyDaily(Base):
__tablename__ = 'energy_daily'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
device_id = Column(Integer, ForeignKey('monitor_devices.device_id'), primary_key=True)
date = Column(Date, primary_key=True)
adjusted_load = Column(Numeric(precision=14, scale=9))
solar = Column(Numeric(precision=14, scale=9))
used = Column(Numeric(precision=14, scale=9))
water_heater = Column(Numeric(precision=14, scale=9))
ashp = Column(Numeric(precision=14, scale=9))
water_pump = Column(Numeric(precision=14, scale=9))
dryer = Column(Numeric(precision=14, scale=9))
washer = Column(Numeric(precision=14, scale=9))
dishwasher = Column(Numeric(precision=14, scale=9))
stove = Column(Numeric(precision=14, scale=9))
refrigerator = Column(Numeric(precision=14, scale=9))
living_room = Column(Numeric(precision=14, scale=9))
aux_heat_bedrooms = Column(Numeric(precision=14, scale=9))
aux_heat_living = Column(Numeric(precision=14, scale=9))
study = Column(Numeric(precision=14, scale=9))
barn = Column(Numeric(precision=14, scale=9))
basement_west = Column(Numeric(precision=14, scale=9))
basement_east = Column(Numeric(precision=14, scale=9))
ventilation = Column(Numeric(precision=14, scale=9))
ventilation_preheat = Column(Numeric(precision=14, scale=9))
kitchen_recept_rt = Column(Numeric(precision=14, scale=9))
class EnergyMonthly(Base):
__tablename__ = 'energy_monthly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
device_id = Column(Integer, ForeignKey('monitor_devices.device_id'), primary_key=True)
date = Column(Date, primary_key=True)
adjusted_load = Column(Numeric(precision=14, scale=9))
solar = Column(Numeric(precision=14, scale=9))
used = Column(Numeric(precision=14, scale=9))
water_heater = Column(Numeric(precision=14, scale=9))
ashp = Column(Numeric(precision=14, scale=9))
water_pump = Column(Numeric(precision=14, scale=9))
dryer = Column(Numeric(precision=14, scale=9))
washer = Column(Numeric(precision=14, scale=9))
dishwasher = Column(Numeric(precision=14, scale=9))
stove = Column(Numeric(precision=14, scale=9))
refrigerator = Column(Numeric(precision=14, scale=9))
living_room = Column(Numeric(precision=14, scale=9))
aux_heat_bedrooms = Column(Numeric(precision=14, scale=9))
aux_heat_living = Column(Numeric(precision=14, scale=9))
study = Column(Numeric(precision=14, scale=9))
barn = Column(Numeric(precision=14, scale=9))
basement_west = Column(Numeric(precision=14, scale=9))
basement_east = Column(Numeric(precision=14, scale=9))
ventilation = Column(Numeric(precision=14, scale=9))
ventilation_preheat = Column(Numeric(precision=14, scale=9))
kitchen_recept_rt = Column(Numeric(precision=14, scale=9))
class HDDMonthly(Base):
__tablename__ = 'hdd_monthly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
date = Column(Date, primary_key=True)
hdd = Column(Numeric(precision=7, scale=3))
class HDDDaily(Base):
__tablename__ = 'hdd_daily'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
date = Column(Date, primary_key=True)
hdd = Column(Numeric(precision=6, scale=3))
class HDDHourly(Base):
__tablename__ = 'hdd_hourly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
date = Column(DateTime, primary_key=True)
hdd = Column(Numeric(precision=6, scale=3))
class EstimatedMonthly(Base):
__tablename__ = 'estimated_monthly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
date = Column(Date, primary_key=True)
solar = Column(Numeric(precision=4))
used = Column(Numeric(precision=4))
hdd = Column(Numeric(precision=4))
water = Column(Numeric(precision=4))
class TemperatureDaily(Base):
__tablename__ = 'temperature_daily'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
device_id = Column(Integer, ForeignKey('monitor_devices.device_id'), primary_key=True)
date = Column(Date, primary_key=True)
temperature_min = Column(Numeric(precision=6, scale=3))
temperature_max = Column(Numeric(precision=6, scale=3))
humidity_min = Column(Numeric(precision=6, scale=3))
humidity_max = Column(Numeric(precision=6, scale=3))
class TemperatureHourly(Base):
__tablename__ = 'temperature_hourly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
device_id = Column(Integer, ForeignKey('monitor_devices.device_id'), primary_key=True)
date = Column(DateTime, primary_key=True)
temperature = Column(Numeric(precision=6, scale=3))
humidity = Column(Numeric(precision=6, scale=3))
class WaterMonthly(Base):
__tablename__ = 'water_monthly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
device_id = Column(Integer, ForeignKey('monitor_devices.device_id'), primary_key=True)
date = Column(Date, primary_key=True)
gallons = Column(Numeric(precision=7, scale=1))
class LimitsHourly(Base):
__tablename__ = 'limits_hourly'
house_id = Column(Integer, ForeignKey('houses.house_id'), primary_key=True)
used_max = Column(Numeric(precision=14, scale=9))
solar_min = Column(Numeric(precision=14, scale=9))
outdoor_deg_min = Column(Numeric(precision=6, scale=3))
outdoor_deg_max = Column(Numeric(precision=6, scale=3))
hdd_max = Column(Numeric(precision=4, scale=3))
start_date = Column(DateTime)
end_date = Column(DateTime)
| netplusdesign/home-performance-flask-api | chartingperformance/models.py | Python | mit | 8,283 |
# ClockAlarm is a cross-platform alarm manager
# Copyright (C) 2017 Loïc Charrière, Samuel Gauthier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from os.path import join, abspath, dirname
from PyQt5.QtCore import Qt, pyqtSignal, QRect
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget, QLabel
from _clockalarm.utils.importExportUtils import get_default_config
class NotificationWidget(QWidget):
"""Notification widget
Attributes:
geometry: The position and size of the widget on the screen
notification: The notification
"""
popup_close = pyqtSignal('PyQt_PyObject')
def __init__(self, geometry, notification, parent=None):
"""Inits the NotificationWidget with a position and a notification.
Note:
By default, the NotificationWidget has no parent
"""
super(NotificationWidget, self).__init__(parent=parent,
flags=Qt.Tool | Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)
self.parent = parent
self.notification = notification
self.init_ui(geometry)
def init_ui(self, geom):
"""Helper method that sets the style of the NotificationWidget.
Attributes:
geom: The position and size of the widget on the screen
"""
self.setGeometry(geom)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setAttribute(Qt.WA_ShowWithoutActivating)
"""Background Image"""
im_name = get_default_config("WIDGET_FILE_NAME")
im_path = join(dirname(dirname(abspath(__file__))), 'resources', 'images',
im_name)
lbl_im = QLabel(self)
lbl_im.setPixmap(QPixmap(im_path))
"""Notification message"""
color = self.notification.get_color()
alpha = get_default_config("WIDGET_TRANSPARENCY", "int")
rgba = "{r}, {g}, {b}, {a}".format(r=color.red(), g=color.green(), b=color.blue(), a=alpha)
lbl = QLabel(self.notification.message, self)
lbl.setAlignment(Qt.AlignVCenter)
lbl.setWordWrap(True)
padding_top = get_default_config("WIDGET_TEXT_PADDING_TOP", "int")
padding_left = get_default_config("WIDGET_TEXT_PADDING_LEFT", "int")
text_width = get_default_config("WIDGET_TEXT_WIDTH", "int")
text_height = get_default_config("WIDGET_TEXT_HEIGHT", "int")
lbl.setGeometry(QRect(padding_left, padding_top, text_width, text_height))
lbl.setFont(self.notification.get_font())
lbl.setStyleSheet(
"QLabel { color : rgba(" + rgba + ")}")
def mousePressEvent(self, event):
"""Override of :class:~PyQt5.QtWidgets.QWidget.mousePressEvent method"""
if self.underMouse():
self.close()
self.popup_close.emit(self)
| BFH-BTI7301-project1/ClockAlarm | _clockalarm/UI/NotificationWidget.py | Python | gpl-3.0 | 3,442 |
from django.test import TestCase
from wagtail.wagtailcore.models import Collection
class TestCollectionTreeOperations(TestCase):
def setUp(self):
self.root_collection = Collection.get_first_root_node()
self.holiday_photos_collection = self.root_collection.add_child(
name="Holiday photos"
)
self.evil_plans_collection = self.root_collection.add_child(
name="Evil plans"
)
def test_get_ancestors(self):
self.assertEqual(
list(self.holiday_photos_collection.get_ancestors().order_by('path')),
[self.root_collection]
)
self.assertEqual(
list(self.holiday_photos_collection.get_ancestors(inclusive=True).order_by('path')),
[self.root_collection, self.holiday_photos_collection]
)
def test_get_descendants(self):
self.assertEqual(
list(self.root_collection.get_descendants().order_by('path')),
[self.holiday_photos_collection, self.evil_plans_collection]
)
self.assertEqual(
list(self.root_collection.get_descendants(inclusive=True).order_by('path')),
[
self.root_collection,
self.holiday_photos_collection,
self.evil_plans_collection
]
)
def test_get_siblings(self):
self.assertEqual(
list(self.holiday_photos_collection.get_siblings().order_by('path')),
[self.holiday_photos_collection, self.evil_plans_collection]
)
self.assertEqual(
list(self.holiday_photos_collection.get_siblings(inclusive=False).order_by('path')),
[self.evil_plans_collection]
)
def test_get_next_siblings(self):
self.assertEqual(
list(
self.holiday_photos_collection.get_next_siblings().order_by('path')
),
[self.evil_plans_collection]
)
self.assertEqual(
list(
self.holiday_photos_collection.get_next_siblings(inclusive=True).order_by('path')
),
[self.holiday_photos_collection, self.evil_plans_collection]
)
self.assertEqual(
list(
self.evil_plans_collection.get_next_siblings().order_by('path')
),
[]
)
def test_get_prev_siblings(self):
self.assertEqual(
list(
self.holiday_photos_collection.get_prev_siblings().order_by('path')
),
[]
)
self.assertEqual(
list(
self.evil_plans_collection.get_prev_siblings().order_by('path')
),
[self.holiday_photos_collection]
)
self.assertEqual(
list(
self.evil_plans_collection.get_prev_siblings(inclusive=True).order_by('path')
),
[self.holiday_photos_collection, self.evil_plans_collection]
)
| davecranwell/wagtail | wagtail/wagtailcore/tests/test_collection_model.py | Python | bsd-3-clause | 3,008 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from rbnics.reduction_methods.navier_stokes.navier_stokes_pod_galerkin_reduction import NavierStokesPODGalerkinReduction
# from rbnics.reduction_methods.navier_stokes.navier_stokes_rb_reduction import NavierStokesRBReduction
from rbnics.reduction_methods.navier_stokes.navier_stokes_reduction_method import NavierStokesReductionMethod
__all__ = [
"NavierStokesPODGalerkinReduction",
# "NavierStokesRBReduction",
"NavierStokesReductionMethod"
]
| mathLab/RBniCS | rbnics/reduction_methods/navier_stokes/__init__.py | Python | lgpl-3.0 | 586 |
from afl_utils import afl_vcrash
import os
import unittest
class AflVCrashTestCase(unittest.TestCase):
def setUp(self):
# Use to set up test environment prior to test case
# invocation
pass
def tearDown(self):
# Use for clean up after tests have run
if os.path.exists('/tmp/afl_multicore.PGID.unittest_sess_01'):
os.remove('/tmp/afl_multicore.PGID.unittest_sess_01')
if os.path.exists('testdata/invalid'):
os.remove('testdata/invalid')
if os.path.exists('testdata/test_coll/invalid'):
os.remove('testdata/test_coll/invalid')
if os.path.exists('testdata/test_coll'):
os.rmdir('testdata/test_coll')
if os.path.exists('testdata/vcrash_filelist'):
os.remove('testdata/vcrash_filelist')
def test_show_info(self):
self.assertIsNone(afl_vcrash.show_info())
def test_verify_samples(self):
# test for invalid crash detection
num_threads = 1
samples = ['testdata/sync/fuzz000/fuzzer_stats'] # invalid (non-crashing) sample
target_cmd = 'ls'
timeout_secs = 3
self.assertEqual((['testdata/sync/fuzz000/fuzzer_stats'], []),
afl_vcrash.verify_samples(num_threads, samples, target_cmd, timeout_secs))
# test for timeout detection
num_threads = 1
samples = ['testdata/sync/fuzz000/fuzzer_stats'] # invalid (non-crashing) sample
target_cmd = 'python testdata/dummy_process/dummyproc.py'
timeout_secs = 1
self.assertEqual(([], ['testdata/sync/fuzz000/fuzzer_stats']),
afl_vcrash.verify_samples(num_threads, samples, target_cmd, timeout_secs))
def test_remove_samples(self):
# fail
samples = ['testdata/invalid']
with self.assertRaises(FileNotFoundError):
afl_vcrash.remove_samples(samples, False)
# success
open('testdata/invalid', 'a').close()
self.assertEqual(1, afl_vcrash.remove_samples(samples, False))
def test_build_target_cmd(self):
# fail
target_cmdline = ['/some/path/to/invalid/target/binary', '--some-opt', '--some-other-opt']
with self.assertRaises(SystemExit) as se:
afl_vcrash.build_target_cmd(target_cmdline)
self.assertEqual(2, se.exception.code)
target_cmdline = ['testdata/dummy_process/dummyproc.py', '-h', '-l']
self.assertIn('testdata/dummy_process/dummyproc.py -h -l', afl_vcrash.build_target_cmd(target_cmdline))
def test_main(self):
# invalid invocation
with self.assertRaises(SystemExit) as se:
afl_vcrash.main(['afl-vcrash', '--some-invalid-opt'])
self.assertEqual(2, se.exception.code)
# invalid collection dir
with self.assertRaises(SystemExit) as se:
afl_vcrash.main(['afl-vcrash', 'testdata/test_coll', '--', '/usr/bin/ls'])
self.assertEqual(1, se.exception.code)
# prepare sample collection dir
os.mkdir('testdata/test_coll')
open('testdata/test_coll/invalid', 'a').close()
self.assertIsNone(afl_vcrash.main(['afl-vcrash', '-f', 'testdata/vcrash_filelist', 'testdata/test_coll',
'--', '/bin/ls']))
self.assertIs(True, os.path.exists('testdata/vcrash_filelist'))
self.assertIs(True, os.path.exists('testdata/test_coll/invalid'))
self.assertIsNone(afl_vcrash.main(['afl-vcrash', '-r', '-f', 'testdata/vcrash_filelist', 'testdata/test_coll',
'--', '/bin/ls']))
self.assertIs(True, os.path.exists('testdata/vcrash_filelist'))
self.assertIs(False, os.path.exists('testdata/test_coll/invalid'))
| rc0r/afl-utils | tests/test_afl_vcrash.py | Python | apache-2.0 | 3,795 |
__all__ = ['create_subprocess_exec', 'create_subprocess_shell']
import subprocess
from . import events
from . import protocols
from . import streams
from . import tasks
from .coroutines import coroutine
from .log import logger
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
DEVNULL = subprocess.DEVNULL
class SubprocessStreamProtocol(streams.FlowControlMixin,
protocols.SubprocessProtocol):
"""Like StreamReaderProtocol, but for a subprocess."""
def __init__(self, limit, loop):
super().__init__(loop=loop)
self._limit = limit
self.stdin = self.stdout = self.stderr = None
self._transport = None
def __repr__(self):
info = [self.__class__.__name__]
if self.stdin is not None:
info.append('stdin=%r' % self.stdin)
if self.stdout is not None:
info.append('stdout=%r' % self.stdout)
if self.stderr is not None:
info.append('stderr=%r' % self.stderr)
return '<%s>' % ' '.join(info)
def connection_made(self, transport):
self._transport = transport
stdout_transport = transport.get_pipe_transport(1)
if stdout_transport is not None:
self.stdout = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stdout.set_transport(stdout_transport)
stderr_transport = transport.get_pipe_transport(2)
if stderr_transport is not None:
self.stderr = streams.StreamReader(limit=self._limit,
loop=self._loop)
self.stderr.set_transport(stderr_transport)
stdin_transport = transport.get_pipe_transport(0)
if stdin_transport is not None:
self.stdin = streams.StreamWriter(stdin_transport,
protocol=self,
reader=None,
loop=self._loop)
def pipe_data_received(self, fd, data):
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader is not None:
reader.feed_data(data)
def pipe_connection_lost(self, fd, exc):
if fd == 0:
pipe = self.stdin
if pipe is not None:
pipe.close()
self.connection_lost(exc)
return
if fd == 1:
reader = self.stdout
elif fd == 2:
reader = self.stderr
else:
reader = None
if reader != None:
if exc is None:
reader.feed_eof()
else:
reader.set_exception(exc)
def process_exited(self):
self._transport.close()
self._transport = None
class Process:
def __init__(self, transport, protocol, loop):
self._transport = transport
self._protocol = protocol
self._loop = loop
self.stdin = protocol.stdin
self.stdout = protocol.stdout
self.stderr = protocol.stderr
self.pid = transport.get_pid()
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.pid)
@property
def returncode(self):
return self._transport.get_returncode()
@coroutine
def wait(self):
"""Wait until the process exit and return the process return code.
This method is a coroutine."""
return (yield from self._transport._wait())
def send_signal(self, signal):
self._transport.send_signal(signal)
def terminate(self):
self._transport.terminate()
def kill(self):
self._transport.kill()
@coroutine
def _feed_stdin(self, input):
debug = self._loop.get_debug()
self.stdin.write(input)
if debug:
logger.debug('%r communicate: feed stdin (%s bytes)',
self, len(input))
try:
yield from self.stdin.drain()
except (BrokenPipeError, ConnectionResetError) as exc:
# communicate() ignores BrokenPipeError and ConnectionResetError
if debug:
logger.debug('%r communicate: stdin got %r', self, exc)
if debug:
logger.debug('%r communicate: close stdin', self)
self.stdin.close()
@coroutine
def _noop(self):
return None
@coroutine
def _read_stream(self, fd):
transport = self._transport.get_pipe_transport(fd)
if fd == 2:
stream = self.stderr
else:
assert fd == 1
stream = self.stdout
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: read %s', self, name)
output = yield from stream.read()
if self._loop.get_debug():
name = 'stdout' if fd == 1 else 'stderr'
logger.debug('%r communicate: close %s', self, name)
transport.close()
return output
@coroutine
def communicate(self, input=None):
if input:
stdin = self._feed_stdin(input)
else:
stdin = self._noop()
if self.stdout is not None:
stdout = self._read_stream(1)
else:
stdout = self._noop()
if self.stderr is not None:
stderr = self._read_stream(2)
else:
stderr = self._noop()
stdin, stdout, stderr = yield from tasks.gather(stdin, stdout, stderr,
loop=self._loop)
yield from self.wait()
return (stdout, stderr)
@coroutine
def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None,
loop=None, limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_shell(
protocol_factory,
cmd, stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
@coroutine
def create_subprocess_exec(program, *args, stdin=None, stdout=None,
stderr=None, loop=None,
limit=streams._DEFAULT_LIMIT, **kwds):
if loop is None:
loop = events.get_event_loop()
protocol_factory = lambda: SubprocessStreamProtocol(limit=limit,
loop=loop)
transport, protocol = yield from loop.subprocess_exec(
protocol_factory,
program, *args,
stdin=stdin, stdout=stdout,
stderr=stderr, **kwds)
return Process(transport, protocol, loop)
| Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/asyncio/subprocess.py | Python | gpl-3.0 | 7,182 |
"""
You have a number of envelopes with widths and heights given as a pair of integers (w, h). One envelope can fit into another if and only if both the width and height of one envelope is greater than the width and height of the other envelope.
What is the maximum number of envelopes can you Russian doll? (put one inside other)
Note:
Rotation is not allowed.
Example:
Input: [[5,4],[6,4],[6,7],[2,3]]
Output: 3
Explanation: The maximum number of envelopes you can Russian doll is 3 ([2,3] => [5,4] => [6,7]).
"""
import bisect
class Solution(object):
def maxEnvelopes(self, envelopes):
"""
:type envelopes: List[List[int]]
:rtype: int
"""
if not envelopes:
return 0
envelopes.sort(key=lambda x:(x[0],-x[1]))
h=[]
for i, e in enumerate(envelopes,0):
j = bisect.bisect_left(h, e[1])
if j < len(h):
h[j] = e[1]
else:
h.append(e[1])
return len(h)
| franklingu/leetcode-solutions | questions/russian-doll-envelopes/Solution.py | Python | mit | 1,024 |
import frappe
def execute():
delivery_notes = frappe.db.get_all(
"Delivery Note", {"status": "Out for Delivery"}, "name")
frappe.reload_doc("stock", "doctype", "delivery_note", force=True)
for delivery_note in delivery_notes:
frappe.db.set_value("Delivery Note", delivery_note.name, "status", "In Transit")
| neilLasrado/erpnext | erpnext/patches/v13_0/update_delivery_note_status_to_in_transit.py | Python | gpl-3.0 | 342 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-02-23 22:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0145_auto_20170211_1825'),
]
operations = [
migrations.AddField(
model_name='league',
name='description',
field=models.TextField(blank=True),
),
]
| cyanfish/heltour | heltour/tournament/migrations/0146_league_description.py | Python | mit | 414 |
"""Module grouping tests for the pydov.types.boring module."""
from pydov.types.grondwaterfilter import GrondwaterFilter
from pydov.util.dovutil import build_dov_url
from tests.abstract import AbstractTestTypes
location_wfs_getfeature = 'tests/data/types/grondwaterfilter/wfsgetfeature.xml'
location_wfs_feature = 'tests/data/types/grondwaterfilter/feature.xml'
location_dov_xml = 'tests/data/types/grondwaterfilter/grondwaterfilter.xml'
class TestGrondwaterFilter(AbstractTestTypes):
"""Class grouping tests for the
pydov.types.grondwaterfilter.GrondwaterFilter class."""
datatype_class = GrondwaterFilter
namespace = 'http://dov.vlaanderen.be/grondwater/gw_meetnetten'
pkey_base = build_dov_url('data/filter/')
field_names = [
'pkey_filter', 'pkey_grondwaterlocatie', 'gw_id',
'filternummer', 'filtertype', 'x', 'y',
'start_grondwaterlocatie_mtaw', 'mv_mtaw',
'gemeente', 'meetnet_code', 'aquifer_code',
'grondwaterlichaam_code', 'regime',
'diepte_onderkant_filter', 'lengte_filter',
'datum', 'tijdstip', 'peil_mtaw',
'betrouwbaarheid', 'methode', 'filterstatus', 'filtertoestand']
field_names_subtypes = [
'datum', 'tijdstip', 'peil_mtaw', 'betrouwbaarheid',
'methode']
field_names_nosubtypes = [
'pkey_filter', 'pkey_grondwaterlocatie', 'gw_id',
'filternummer', 'filtertype', 'x', 'y',
'start_grondwaterlocatie_mtaw', 'mv_mtaw',
'gemeente', 'meetnet_code', 'aquifer_code',
'grondwaterlichaam_code', 'regime',
'diepte_onderkant_filter', 'lengte_filter']
valid_returnfields = ('pkey_filter', 'meetnet_code')
valid_returnfields_subtype = ('pkey_filter', 'peil_mtaw')
inexistent_field = 'onbestaand'
| DOV-Vlaanderen/pydov | tests/test_types_grondwaterfilter.py | Python | mit | 1,779 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import sys
try:
import cPickle as pickle
except:
import pickle
from qsrlib.qsrlib import QSRlib, QSRlib_Request_Message
from qsrlib_io.world_trace import Object_State, World_Trace
import argparse
import csv
def pretty_print_world_qsr_trace(which_qsr, qsrlib_response_message):
print(which_qsr, "request was made at ", str(qsrlib_response_message.req_made_at)
+ " and received at " + str(qsrlib_response_message.req_received_at)
+ " and finished at " + str(qsrlib_response_message.req_finished_at))
print("---")
print("Response is:")
for t in qsrlib_response_message.qsrs.get_sorted_timestamps():
foo = str(t) + ": "
for k, v in zip(qsrlib_response_message.qsrs.trace[t].qsrs.keys(),
qsrlib_response_message.qsrs.trace[t].qsrs.values()):
foo += str(k) + ":" + str(v.qsr) + "; "
print(foo)
if __name__ == "__main__":
options = ["rcc2", "rcc3", "rcc8", "cardir", "qtcbs", "qtccs", "qtcbcs", "argd", "argprobd", "mos", "multiple"]
multiple = options[:]; multiple.remove("multiple"); multiple.remove("argd"); multiple.remove("argprobd")
parser = argparse.ArgumentParser()
parser.add_argument("qsr", help="choose qsr: %s" % options, type=str)
parser.add_argument("-i", "--input", help="file from which to read object states", type=str)
parser.add_argument("--validate", help="validate state chain. Only QTC", action="store_true")
parser.add_argument("--quantisation_factor", help="quantisation factor for 0-states in qtc, or 's'-states in mos", type=float)
parser.add_argument("--no_collapse", help="does not collapse similar adjacent states. Only QTC", action="store_true")
parser.add_argument("--distance_threshold", help="distance threshold for qtcb <-> qtcc transition. Only QTCBC", type=float)
parser.add_argument("-c", "--config", help="config file", type=str)
parser.add_argument("--ros", action="store_true", default=False, help="Use ROS eco-system")
args = parser.parse_args()
if args.qsr in options:
which_qsr = args.qsr
else:
raise ValueError("qsr not found, keywords: %s" % options)
world = World_Trace()
dynamic_args = {}
if which_qsr == "rcc3" or which_qsr == "rcc2":
dynamic_args = {which_qsr: {"quantisation_factor": args.quantisation_factor}}
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=1, x=1., y=2., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=2, x=1., y=3., xsize=5., ysize=8.)]
o2 = [Object_State(name="o2", timestamp=0, x=11., y=1., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=1, x=11., y=2., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=2, x=11., y=3., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=3, x=11., y=4., xsize=5., ysize=8.)]
o3 = [Object_State(name="o3", timestamp=0, x=1., y=11., xsize=5., ysize=8.),
Object_State(name="o3", timestamp=1, x=2., y=11., xsize=5., ysize=8.),
Object_State(name="o3", timestamp=2, x=3., y=11., xsize=5., ysize=8.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
world.add_object_state_series(o3)
elif which_qsr == "rcc8":
# dynamic_args = {which_qsr: {"quantisation_factor": args.quantisation_factor}}
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=1, x=1., y=2., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=2, x=1., y=3., xsize=5., ysize=8.)]
o2 = [Object_State(name="o2", timestamp=0, x=11., y=1., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=1, x=11., y=2., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=2, x=11., y=3., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=3, x=11., y=4., xsize=5., ysize=8.)]
o3 = [Object_State(name="o3", timestamp=0, x=1., y=11., xsize=5., ysize=8.),
Object_State(name="o3", timestamp=1, x=2., y=11., xsize=5., ysize=8.),
Object_State(name="o3", timestamp=2, x=3., y=11., xsize=5., ysize=8.)]
o4 = [Object_State(name="o4", timestamp=0, x=1., y=11., xsize=7., ysize=9.),
Object_State(name="o4", timestamp=1, x=2., y=11., xsize=7., ysize=9.),
Object_State(name="o4", timestamp=2, x=3., y=11., xsize=7., ysize=9.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
world.add_object_state_series(o3)
world.add_object_state_series(o4)
elif which_qsr == "mos":
dynamic_args = {which_qsr: {"quantisation_factor": args.quantisation_factor}}
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=1, x=2., y=1., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=2, x=3., y=1., xsize=5., ysize=8.)]
o2 = [Object_State(name="o2", timestamp=0, x=11., y=1., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=1, x=11., y=10., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=2, x=11., y=20., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=3, x=11., y=30., xsize=5., ysize=8.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
elif which_qsr == "argd" or which_qsr == "argprobd":
qsr_relations_and_values = {"0": 5., "1": 15., "2": 100.} if which_qsr == "argd" else {"0": (2.5,2.5/2), "1": (7.5,7.5/2), "2": [50,50/2]}
dynamic_args = {which_qsr: {"qsr_relations_and_values": qsr_relations_and_values}}
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=1, x=1., y=2., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=2, x=1., y=2., xsize=5., ysize=8.)]
o2 = [Object_State(name="o2", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=1, x=11., y=2., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=2, x=21., y=2., xsize=5., ysize=8.)]
o3 = [Object_State(name="o3", timestamp=0, x=21., y=1., xsize=5., ysize=8.),
Object_State(name="o3", timestamp=1, x=31., y=2., xsize=5., ysize=8.),
Object_State(name="o3", timestamp=2, x=41., y=2., xsize=5., ysize=8.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
world.add_object_state_series(o3)
elif which_qsr == "cardir":
o1 = [Object_State(name="o1", timestamp=0, x=5., y=5., xsize=2., ysize=2.),
Object_State(name="o1", timestamp=1, x=5., y=5., xsize=2., ysize=2.),
Object_State(name="o1", timestamp=2, x=5., y=5., xsize=2., ysize=2.)]
o2 = [Object_State(name="o2", timestamp=0, x=8., y=8., xsize=2., ysize=2.),
Object_State(name="o2", timestamp=1, x=6., y=8., xsize=2., ysize=2.),
Object_State(name="o2", timestamp=2, x=4., y=8., xsize=2., ysize=2.),
Object_State(name="o2", timestamp=3, x=2., y=8., xsize=2., ysize=2.)]
o3 = [Object_State(name="o3", timestamp=0, x=3., y=3., xsize=2., ysize=2.),
Object_State(name="o3", timestamp=1, x=4., y=3., xsize=2., ysize=2.),
Object_State(name="o3", timestamp=2, x=6., y=3., xsize=2., ysize=2.)]
o4 = [Object_State(name="o4", timestamp=0, x=4., y=11., xsize=7., ysize=9.),
Object_State(name="o4", timestamp=1, x=6., y=11., xsize=7., ysize=9.),
Object_State(name="o4", timestamp=2, x=8., y=11., xsize=7., ysize=9.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
world.add_object_state_series(o3)
world.add_object_state_series(o4)
elif which_qsr == "qtcbs":
dynamic_args = {which_qsr: {
"quantisation_factor": args.quantisation_factor,
"validate": args.validate,
"no_collapse": args.no_collapse
}}
if args.input:
ob = []
with open(args.input) as csvfile:
reader = csv.DictReader(csvfile)
print("Reading file '%s':" % args.input)
for idx,row in enumerate(reader):
ob.append(Object_State(
name=row['agent1'],
timestamp=idx,
x=float(row['x1']),
y=float(row['y1'])
))
ob.append(Object_State(
name=row['agent2'],
timestamp=idx,
x=float(row['x2']),
y=float(row['y2'])
))
world.add_object_state_series(ob)
else:
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1.),
Object_State(name="o1", timestamp=1, x=2., y=1.),
Object_State(name="o1", timestamp=2, x=1., y=1.)]
o2 = [Object_State(name="o2", timestamp=0, x=4., y=1.),
Object_State(name="o2", timestamp=1, x=4., y=1.),
Object_State(name="o2", timestamp=2, x=5., y=1.)]
o3 = [Object_State(name="o3", timestamp=0, x=4., y=1.),
Object_State(name="o3", timestamp=1, x=4., y=1.),
Object_State(name="o3", timestamp=2, x=5., y=1.)]
o4 = [Object_State(name="o4", timestamp=0, x=14., y=11.),
Object_State(name="o4", timestamp=1, x=14., y=11.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
world.add_object_state_series(o3)
# world.add_object_state_series(o4) # test for missing values
elif which_qsr == "qtccs":
dynamic_args = {which_qsr: {
"quantisation_factor": args.quantisation_factor,
"validate": args.validate,
"no_collapse": args.no_collapse
}}
if args.input:
ob = []
with open(args.input) as csvfile:
reader = csv.DictReader(csvfile)
print("Reading file '%s':" % args.input)
for idx,row in enumerate(reader):
ob.append(Object_State(
name=row['agent1'],
timestamp=idx,
x=float(row['x1']),
y=float(row['y1'])
))
ob.append(Object_State(
name=row['agent2'],
timestamp=idx,
x=float(row['x2']),
y=float(row['y2'])
))
world.add_object_state_series(ob)
else:
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1.),
Object_State(name="o1", timestamp=1, x=2., y=2.),
Object_State(name="o1", timestamp=2, x=1., y=2.)]
o2 = [Object_State(name="o2", timestamp=0, x=4., y=1.),
Object_State(name="o2", timestamp=1, x=4., y=1.),
Object_State(name="o2", timestamp=2, x=5., y=1.)]
o4 = [Object_State(name="o4", timestamp=0, x=14., y=11.),
Object_State(name="o4", timestamp=1, x=14., y=11.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
# world.add_object_state_series(o4) # test for missing values
elif which_qsr == "qtcbcs":
dynamic_args = {which_qsr: {
"quantisation_factor": args.quantisation_factor,
"distance_threshold": args.distance_threshold,
"validate": args.validate,
"no_collapse": args.no_collapse
}}
if args.input:
ob = []
with open(args.input) as csvfile:
reader = csv.DictReader(csvfile)
print("Reading file '%s':" % args.input)
for idx,row in enumerate(reader):
ob.append(Object_State(
name=row['agent1'],
timestamp=idx,
x=float(row['x1']),
y=float(row['y1'])
))
ob.append(Object_State(
name=row['agent2'],
timestamp=idx,
x=float(row['x2']),
y=float(row['y2'])
))
world.add_object_state_series(ob)
else:
o1 = [Object_State(name="o1", timestamp=0, x=1., y=1.),
Object_State(name="o1", timestamp=1, x=2., y=2.),
Object_State(name="o1", timestamp=2, x=1., y=2.)]
o2 = [Object_State(name="o2", timestamp=0, x=4., y=1.),
Object_State(name="o2", timestamp=1, x=4., y=1.),
Object_State(name="o2", timestamp=2, x=5., y=1.)]
o4 = [Object_State(name="o4", timestamp=0, x=14., y=11.),
Object_State(name="o4", timestamp=1, x=14., y=11.)]
world.add_object_state_series(o1)
world.add_object_state_series(o2)
# world.add_object_state_series(o4) # test for missing values
elif which_qsr == "multiple":
which_qsr = multiple
traj = [Object_State(name="traj", timestamp=0, x=1., y=1., xsize=5., ysize=8.),
Object_State(name="traj", timestamp=1, x=1., y=2., xsize=5., ysize=8.)]
o1 = [Object_State(name="o1", timestamp=0, x=11., y=1., xsize=5., ysize=8.),
Object_State(name="o1", timestamp=1, x=11., y=2., xsize=5., ysize=8.)]
o2 = [Object_State(name="o2", timestamp=0, x=11., y=1., xsize=5., ysize=8.),
Object_State(name="o2", timestamp=1, x=11., y=2., xsize=5., ysize=8.)]
world.add_object_state_series(traj)
world.add_object_state_series(o1)
# # DBG: testing qsrs_for
# try:
# dynamic_args[which_qsr]["qsrs_for"] = [("o1", "o2"), ("o1", "o3")]
# except KeyError:
# dynamic_args[which_qsr] = {"qsrs_for": [("o1", "o3"), ("o1", "o3")]}
# try:
# dynamic_args[which_qsr]["qsrs_for"] = ["o1"]
# except KeyError:
# dynamic_args[which_qsr] = {"qsrs_for": ["o1"]}
# dynamic_args["for_all_qsrs"] = {"qsrs_for": [("o1", "o2"), "o2"]}
# try:
# print(dynamic_args[which_qsr]["qsrs_for"])
# except KeyError:
# print("qsrs_for not set in which_qsr namespace")
# print(dynamic_args["for_all_qsrs"]["qsrs_for"])
# # DBG: eof
qsrlib_request_message = QSRlib_Request_Message(which_qsr=which_qsr, input_data=world, dynamic_args=dynamic_args)
if args.ros:
try:
import rospy
from qsrlib_ros.qsrlib_ros_client import QSRlib_ROS_Client
except ImportError:
raise ImportError("ROS not found")
client_node = rospy.init_node("qsr_lib_ros_client_example")
cln = QSRlib_ROS_Client()
req = cln.make_ros_request_message(qsrlib_request_message)
res = cln.request_qsrs(req)
qsrlib_response_message = pickle.loads(res.data)
else:
qsrlib = QSRlib()
qsrlib_response_message = qsrlib.request_qsrs(req_msg=qsrlib_request_message)
pretty_print_world_qsr_trace(which_qsr, qsrlib_response_message)
| yianni/rtd-dbg | qsr_lib/scripts/example_extended.py | Python | mit | 15,681 |
import numpy
from .namedFilter import namedFilter
from .upConv import upConv
def upBlur(*args):
''' RES = upBlur(IM, LEVELS, FILT)
Upsample and blur an image. The blurring is done with filter
kernel specified by FILT (default = 'binom5'), which can be a string
(to be passed to namedFilter), a vector (applied separably as a 1D
convolution kernel in X and Y), or a matrix (applied as a 2D
convolution kernel). The downsampling is always by 2 in each
direction.
The procedure is applied recursively LEVELS times (default=1).
Eero Simoncelli, 4/97. Python port by Rob Young, 10/15. '''
#---------------------------------------------------------------
# REQUIRED ARGS
if len(args) == 0:
print('Usage: upBlur(image, levels, filter)')
print('first argument is required')
else:
im = numpy.array(args[0])
#---------------------------------------------------------------
# OPTIONAL ARGS
if len(args) > 1:
nlevs = args[1]
else:
nlevs = 1
if len(args) > 2:
filt = args[2]
else:
filt = 'binom5'
#------------------------------------------------------------------
if isinstance(filt, str):
filt = namedFilter(filt)
if nlevs > 1:
im = upBlur(im, nlevs - 1, filt)
if nlevs >= 1:
if im.shape[0] == 1 or im.shape[1] == 1:
if im.shape[0] == 1:
filt = filt.reshape(filt.shape[1], filt.shape[0])
start = (1, 2)
else:
start = (2, 1)
res = upConv(im, filt, 'reflect1', start)
elif filt.shape[0] == 1 or filt.shape[1] == 1:
if filt.shape[0] == 1:
filt = filt.reshape(filt.shape[1], 1)
res = upConv(im, filt, 'reflect1', [2, 1])
res = upConv(res, filt.T, 'reflect1', [1, 2])
else:
res = upConv(im, filt, 'reflect1', [2, 2])
else:
res = im
return res
| tochikuji/pyPyrTools | pyrtools/upBlur.py | Python | mit | 2,030 |
#! /usr/bin/env python
from mock import patch
from unittest import TestCase
import pandas as pd
from pandashells.bin.p_cdf import main
class MainTests(TestCase):
@patch(
'pandashells.bin.p_cdf.sys.argv',
'p.cdf -c x -q -n 10'.split())
@patch('pandashells.bin.p_cdf.io_lib.df_to_output')
@patch('pandashells.bin.p_cdf.io_lib.df_from_input')
def test_cli_quiet(self, df_from_input_mock, df_to_output_mock):
df_in = pd.DataFrame({
'x': range(1, 101)
})
df_from_input_mock.return_value = df_in
main()
df_out = df_to_output_mock.call_args_list[0][0][1]
self.assertEqual(list(df_out.columns), ['x', 'p_less', 'p_greater'])
self.assertEqual(len(df_out), 10)
@patch(
'pandashells.bin.p_cdf.sys.argv',
'p.cdf -c x -n 10'.split())
@patch('pandashells.bin.p_cdf.plot_lib.show')
@patch('pandashells.bin.p_cdf.io_lib.df_from_input')
def test_cli(self, df_from_input_mock, show_mock):
df_in = pd.DataFrame({
'x': range(1, 101)
})
df_from_input_mock.return_value = df_in
main()
self.assertTrue(show_mock.called)
| moreati/pandashells | pandashells/test/p_cdf_tests.py | Python | bsd-2-clause | 1,185 |
__author__ = 'vsharman'
| andrewgee/service-manager | test/__init__.py | Python | apache-2.0 | 24 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HW.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| zhu913104/KMdriod | HW/manage.py | Python | mit | 534 |
import logging
import logging.config
import os
# Google App Engine imports.
from google.appengine.ext.webapp import util
# Force Django to reload its settings.
from django.conf import settings
settings._target = None
# Must set this env var before importing any part of Django
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
import django.core.signals
#import django.db
import django.dispatch.dispatcher
def log_exception(*args, **kwds):
logging.exception('Exception in request:')
# Log errors.
django.dispatch.dispatcher.connect(
log_exception, django.core.signals.got_request_exception)
# Unregister the rollback event handler.
#django.dispatch.dispatcher.disconnect(
# django.db._rollback_on_exception,
# django.core.signals.got_request_exception)
def real_main():
#logging.config.fileConfig("1logging.conf")
root=logging.getLogger("root")
root.setLevel(logging.WARNING)
logger=logging.getLogger("smartSweeper")
logger.setLevel(logging.DEBUG)
#logger.propagate=0
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
def profile_main():
# This is the main function for profiling
# We've renamed our original main() above to real_main()
import cProfile, pstats, StringIO
prof = cProfile.Profile()
prof = prof.runctx("real_main()", globals(), locals())
stream = StringIO.StringIO()
stats = pstats.Stats(prof, stream=stream)
stats.sort_stats("time") # Or cumulative
stats.print_stats(80) # 80 = how many to print
# The rest is optional.
# stats.print_callees()
# stats.print_callers()
logging.info("Profile data:\n%s", stream.getvalue())
if __name__ == '__main__':
real_main() | skibaa/smart-sweeper | main.py | Python | apache-2.0 | 1,792 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask
from flask import render_template
from pattern.web import Newsfeed, plaintext
from alchemyapi import AlchemyAPI
app = Flask(__name__)
reader = Newsfeed()
alchemyapi = AlchemyAPI()
RSS_LIST = [
(u"Lifehacker", "http://feeds.gawker.com/lifehacker/vip"),
(u"The Verge", "http://www.theverge.com/rss/index.xml"),
(u"Naukas", "http://naukas.com/feed/"),
(u"Zen Habits", "http://feeds.feedburner.com/zenhabits?format=xml"),
(u"Yuri", "http://www.lapizarradeyuri.com/feed/"),
(u"Menéame", "http://www.meneame.net/rss")
]
items = []
for feed in RSS_LIST:
feedlist = []
for result in reader.search(feed[1])[:10]:
clean_text = plaintext(result.text)
response = alchemyapi.entities("text", result.text)
entities = []
for entity in response["entities"]:
if entity.has_key("disambiguated"):
dbpedia_uri = entity["disambiguated"]["dbpedia"]
else:
dbpedia_uri = None
entities.append((entity["text"], dbpedia_uri))
feedlist.append(dict(title=result.title, url=result.url, text=clean_text, entities=entities))
items.append(dict(site=feed[0], feedlist=feedlist))
@app.route('/')
def index():
return render_template("index.html", items=items)
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| vitojph/myrss | myrss.py | Python | gpl-2.0 | 1,343 |
from distutils.core import setup
setup(name='pySpaRSA',
version='1.0',
description='Python implementation of SpaRSA',
author='Eric Jonas',
author_email='jonas@ericjonas.com',
url='http://github.com/ericmjonas/pysparsa/',
packages=['pysparsa'],
)
| ericmjonas/pySpaRSA | setup.py | Python | mit | 288 |
# -*- coding: utf-8 -*-
import json
import urllib
import urllib2
from django.contrib.auth.models import User
from django.core.exceptions import MultipleObjectsReturned
from django.http import Http404
from django.shortcuts import render
from django.views.generic import View
from django.views.generic.detail import SingleObjectMixin
from actstream.models import following
from social.apps.django_app.default.models import UserSocialAuth
from userspace.models import UserProfile
class CivilUserMixin(SingleObjectMixin):
""" Provides common context for user activities views.
"""
model = UserProfile
slug_field = "clean_username"
slug_url_kwarg = "username"
context_object_name = "profile"
template_name = "activities/followed_user_list.html"
def dispatch(self, *args, **kwargs):
self.object = self.get_object()
return super(CivilUserMixin, self).dispatch(*args, **kwargs)
class FollowedUserList(CivilUserMixin, View):
""" Presents list of all users followed by given user.
"""
def get_context_data(self):
context = super(FollowedUserList, self).get_context_data()
context.update({
'object_list': [x for x in following(self.object.user, User)\
if x is not None], })
return context
def get(self, request, **kwargs):
return render(request, self.template_name, self.get_context_data())
class FacebookFriendList(CivilUserMixin, View):
""" List of all facebook friends of currently logged in user.
"""
def get_context_data(self):
url = 'https://graph.facebook.com/{}/friends?{}'
params = urllib.urlencode({
'access_token': self.s_auth.extra_data['access_token'],
'fields': ['id', ], })
res = urllib2.urlopen(url.format(self.s_auth.extra_data['id'], params))
id_list = [x['id'] for x in json.loads(res.read())['data']]
context = super(FacebookFriendList, self).get_context_data()
fb_users = UserSocialAuth.objects.filter(provider='facebook')
friends = [x.user for x in fb_users if x.extra_data['id'] in id_list]
context.update({'object_list': friends, 'fb_list': True, })
return context
def get(self, request, **kwargs):
user = self.request.user
if user.is_anonymous() or user != self.object.user:
raise Http404
try:
self.s_auth = self.object.user.social_auth.get(provider='facebook')
except MultipleObjectsReturned:
self.s_auth = self.object.user.social_auth.filter(provider='facebook').first()
except UserSocialAuth.DoesNotExist:
request.session['relogin'] = json.dumps({
'backend': 'facebook',
'next_url': request.path, })
return render(request, 'userspace/fb-login-required.html', {
'profile': self.request.user.profile, })
return render(request, self.template_name, self.get_context_data())
| CivilHub/CivilHub | activities/views.py | Python | gpl-3.0 | 3,027 |
# members.urls
# URLs for routing the members app.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri Feb 12 23:30:10 2016 -0500
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: urls.py [d011c91] benjamin@bengfort.com $
"""
URLs for routing the members app.
"""
##########################################################################
## Imports
##########################################################################
from django.conf.urls import url
from members.views import *
##########################################################################
## URL Patterns
##########################################################################
urlpatterns = (
url(r'^members/$', MemberListView.as_view(), name='list'),
url(r'^(?P<slug>[\w\.\-]+)/$', MemberView.as_view(), name='detail'),
)
| DistrictDataLabs/partisan-discourse | members/urls.py | Python | apache-2.0 | 883 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sqrtrading.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| alexbid/sqrtrading | manage.py | Python | mit | 808 |
"""Support for Toon thermostat."""
import logging
from typing import Any, Dict, List, Optional
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_COMFORT,
PRESET_HOME,
PRESET_SLEEP,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from . import ToonData, ToonDisplayDeviceEntity
from .const import (
DATA_TOON,
DATA_TOON_CLIENT,
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_PRESET = [PRESET_AWAY, PRESET_COMFORT, PRESET_HOME, PRESET_SLEEP]
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up a Toon binary sensors based on a config entry."""
toon_client = hass.data[DATA_TOON_CLIENT][entry.entry_id]
toon_data = hass.data[DATA_TOON][entry.entry_id]
async_add_entities([ToonThermostatDevice(toon_client, toon_data)], True)
class ToonThermostatDevice(ToonDisplayDeviceEntity, ClimateDevice):
"""Representation of a Toon climate device."""
def __init__(self, toon_client, toon_data: ToonData) -> None:
"""Initialize the Toon climate device."""
self._client = toon_client
self._current_temperature = None
self._target_temperature = None
self._heating = False
self._next_target_temperature = None
self._preset = None
self._heating_type = None
super().__init__(toon_data, "Toon Thermostat", "mdi:thermostat")
@property
def unique_id(self) -> str:
"""Return the unique ID for this thermostat."""
return "_".join([DOMAIN, self.toon.agreement.id, "climate"])
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_HEAT]
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation."""
if self._heating:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp."""
if self._preset is not None:
return self._preset.lower()
return None
@property
def preset_modes(self) -> List[str]:
"""Return a list of available preset modes."""
return SUPPORT_PRESET
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return DEFAULT_MIN_TEMP
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return DEFAULT_MAX_TEMP
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the current state of the burner."""
return {"heating_type": self._heating_type}
def set_temperature(self, **kwargs) -> None:
"""Change the setpoint of the thermostat."""
temperature = kwargs.get(ATTR_TEMPERATURE)
self._client.thermostat = self._target_temperature = temperature
self.schedule_update_ha_state()
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
self._client.thermostat_state = self._preset = preset_mode
self.schedule_update_ha_state()
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
pass
def update(self) -> None:
"""Update local state."""
if self.toon.thermostat_state is None:
self._preset = None
else:
self._preset = self.toon.thermostat_state.name
self._current_temperature = self.toon.temperature
self._target_temperature = self.toon.thermostat
self._heating_type = self.toon.agreement.heating_type
self._heating = self.toon.thermostat_info.burner_info == 1
| leppa/home-assistant | homeassistant/components/toon/climate.py | Python | apache-2.0 | 4,943 |
# -*- coding: utf-8 -*-
__author__ = 'rldotai'
__email__ = 'rldot41@gmail.com'
__version__ = '0.1.0'
| rldotai/rlbench | rlbench/__init__.py | Python | gpl-3.0 | 102 |
"""
rendering.py
--------------
Functions to convert trimesh objects to pyglet/opengl objects.
"""
import numpy as np
from . import util
# avoid importing pyglet or pyglet.gl
# as pyglet does things on import
GL_POINTS, GL_LINES, GL_TRIANGLES = (0, 1, 4)
def convert_to_vertexlist(geometry, **kwargs):
"""
Try to convert various geometry objects to the constructor
args for a pyglet indexed vertex list.
Parameters
------------
obj : Trimesh, Path2D, Path3D, (n,2) float, (n,3) float
Object to render
Returns
------------
args : tuple
Args to be passed to pyglet indexed vertex list
constructor.
"""
if util.is_instance_named(geometry, 'Trimesh'):
return mesh_to_vertexlist(geometry, **kwargs)
elif util.is_instance_named(geometry, 'Path'):
# works for Path3D and Path2D
# both of which inherit from Path
return path_to_vertexlist(geometry,
**kwargs)
elif util.is_instance_named(geometry, 'PointCloud'):
# pointcloud objects contain colors
return points_to_vertexlist(geometry.vertices,
colors=geometry.colors,
**kwargs)
elif util.is_instance_named(geometry, 'ndarray'):
# (n,2) or (n,3) points
return points_to_vertexlist(geometry, **kwargs)
elif util.is_instance_named(geometry, 'VoxelGrid'):
# for voxels view them as a bunch of boxes
return mesh_to_vertexlist(geometry.as_boxes(**kwargs),
**kwargs)
else:
raise ValueError('Geometry passed is not a viewable type!')
def mesh_to_vertexlist(mesh,
group=None,
smooth=True,
smooth_threshold=60000):
"""
Convert a Trimesh object to arguments for an
indexed vertex list constructor.
Parameters
-------------
mesh : trimesh.Trimesh
Mesh to be rendered
group : str
Rendering group for the vertex list
smooth : bool
Should we try to smooth shade the mesh
smooth_threshold : int
Maximum number of faces to smooth shade
Returns
--------------
args : (7,) tuple
Args for vertex list constructor
"""
if hasattr(mesh.visual, 'uv'):
# if the mesh has texture defined pass it to pyglet
vertex_count = len(mesh.vertices)
normals = mesh.vertex_normals.reshape(-1).tolist()
faces = mesh.faces.reshape(-1).tolist()
vertices = mesh.vertices.reshape(-1).tolist()
# get the per-vertex UV coordinates
uv = mesh.visual.uv
# does the material actually have an image specified
no_image = (hasattr(mesh.visual.material, 'image') and
mesh.visual.material.image is None)
if uv is None or no_image:
# if no UV coordinates on material, just set face colors
# to the diffuse color of the material
color_gl = colors_to_gl(mesh.visual.material.main_color,
vertex_count)
else:
# if someone passed (n, 3) UVR cut it off here
if uv.shape[1] > 2:
uv = uv[:, :2]
# texcoord as (2,) float
color_gl = ('t2f/static',
uv.astype(np.float64).reshape(-1).tolist())
elif smooth and len(mesh.faces) < smooth_threshold:
# if we have a small number of faces and colors defined
# smooth the mesh by merging vertices of faces below
# the threshold angle
mesh = mesh.smoothed()
vertex_count = len(mesh.vertices)
normals = mesh.vertex_normals.reshape(-1).tolist()
faces = mesh.faces.reshape(-1).tolist()
vertices = mesh.vertices.reshape(-1).tolist()
color_gl = colors_to_gl(mesh.visual.vertex_colors,
vertex_count)
else:
# we don't have textures or want to smooth so
# send a polygon soup of disconnected triangles to opengl
vertex_count = len(mesh.triangles) * 3
normals = np.tile(mesh.face_normals,
(1, 3)).reshape(-1).tolist()
vertices = mesh.triangles.reshape(-1).tolist()
faces = np.arange(vertex_count).tolist()
colors = np.tile(mesh.visual.face_colors,
(1, 3)).reshape((-1, 4))
color_gl = colors_to_gl(colors, vertex_count)
# create the ordered tuple for pyglet, use like:
# `batch.add_indexed(*args)`
args = (vertex_count, # number of vertices
GL_TRIANGLES, # mode
group, # group
faces, # indices
('v3f/static', vertices),
('n3f/static', normals),
color_gl)
return args
def path_to_vertexlist(path, group=None, **kwargs):
"""
Convert a Path3D object to arguments for a
pyglet indexed vertex list constructor.
Parameters
-------------
path : trimesh.path.Path3D object
Mesh to be rendered
group : str
Rendering group for the vertex list
Returns
--------------
args : (7,) tuple
Args for vertex list constructor
"""
# avoid cache check inside tight loop
vertices = path.vertices
# get (n, 2, (2|3)) lines
stacked = [util.stack_lines(e.discrete(vertices))
for e in path.entities]
lines = util.vstack_empty(stacked)
count = len(lines)
# stack zeros for 2D lines
if util.is_shape(vertices, (-1, 2)):
lines = lines.reshape((-1, 2))
lines = np.column_stack((lines, np.zeros(len(lines))))
# index for GL is one per point
index = np.arange(count).tolist()
# convert from entity color to the color of
# each vertex in the line segments
vcolor = np.vstack(
[(np.ones((len(s), 4)) * c).astype(np.uint8)
for s, c in zip(stacked, path.colors)])
# convert to gl-friendly colors
gl_colors = colors_to_gl(vcolor, count=count)
# collect args for vertexlist constructor
args = (count, # number of lines
GL_LINES, # mode
group, # group
index, # indices
('v3f/static', lines.reshape(-1)),
gl_colors)
return args
def points_to_vertexlist(points,
colors=None,
group=None,
**kwargs):
"""
Convert a numpy array of 3D points to args for
a vertex list constructor.
Parameters
-------------
points : (n, 3) float
Points to be rendered
colors : (n, 3) or (n, 4) float
Colors for each point
group : str
Rendering group for the vertex list
Returns
--------------
args : (7,) tuple
Args for vertex list constructor
"""
points = np.asanyarray(points, dtype=np.float64)
if util.is_shape(points, (-1, 2)):
points = np.column_stack((points, np.zeros(len(points))))
elif not util.is_shape(points, (-1, 3)):
raise ValueError('Pointcloud must be (n,3)!')
index = np.arange(len(points)).tolist()
args = (len(points), # number of vertices
GL_POINTS, # mode
group, # group
index, # indices
('v3f/static', points.reshape(-1)),
colors_to_gl(colors, len(points)))
return args
def colors_to_gl(colors, count):
"""
Given a list of colors (or None) return a GL-acceptable
list of colors.
Parameters
------------
colors: (count, (3 or 4)) float
Input colors as an array
Returns
---------
colors_type : str
Color type
colors_gl : (count,) list
Colors to pass to pyglet
"""
colors = np.asanyarray(colors)
count = int(count)
# get the GL kind of color we have
colors_dtypes = {'f': 'f',
'i': 'B',
'u': 'B'}
if colors.dtype.kind in colors_dtypes:
dtype = colors_dtypes[colors.dtype.kind]
else:
dtype = None
if dtype is not None and util.is_shape(colors, (count, (3, 4))):
# save the shape and dtype for opengl color string
colors_type = 'c{}{}/static'.format(colors.shape[1], dtype)
# reshape the 2D array into a 1D one and then convert to a python list
gl_colors = colors.reshape(-1).tolist()
elif dtype is not None and colors.shape in [(3,), (4,)]:
# we've been passed a single color so tile them
gl_colors = (np.ones((count, colors.size),
dtype=colors.dtype) * colors).reshape(-1).tolist()
# we know we're tiling
colors_type = 'c{}{}/static'.format(colors.size, dtype)
else:
# case where colors are wrong shape
# use black as the default color
gl_colors = np.tile([0.0, 0.0, 0.0],
(count, 1)).reshape(-1).tolist()
# we're returning RGB float colors
colors_type = 'c3f/static'
return colors_type, gl_colors
def material_to_texture(material, upsize=True):
"""
Convert a trimesh.visual.texture.Material object into
a pyglet-compatible texture object.
Parameters
--------------
material : trimesh.visual.texture.Material
Material to be converted
upsize: bool
If True, will upscale textures to their nearest power
of two resolution to avoid weirdness
Returns
---------------
texture : pyglet.image.Texture
Texture loaded into pyglet form
"""
import pyglet
# try to extract a PIL image from material
if hasattr(material, 'image'):
img = material.image
else:
img = material.baseColorTexture
# if no images in texture return now
if img is None:
return None
# if we're not powers of two upsize
if upsize:
from .visual.texture import power_resize
img = power_resize(img)
# use a PNG export to exchange into pyglet
# probably a way to do this with a PIL converter
with util.BytesIO() as f:
# export PIL image as PNG
img.save(f, format='png')
f.seek(0)
# filename used for format guess
gl_image = pyglet.image.load(filename='.png', file=f)
# turn image into pyglet texture
texture = gl_image.get_texture()
return texture
def matrix_to_gl(matrix):
"""
Convert a numpy row-major homogeneous transformation matrix
to a flat column-major GLfloat transformation.
Parameters
-------------
matrix : (4,4) float
Row-major homogeneous transform
Returns
-------------
glmatrix : (16,) gl.GLfloat
Transform in pyglet format
"""
from pyglet import gl
# convert to GLfloat, switch to column major and flatten to (16,)
return (gl.GLfloat * 16)(*np.asanyarray(
matrix, dtype=np.float32).T.ravel())
def vector_to_gl(array, *args):
"""
Convert an array and an optional set of args into a
flat vector of gl.GLfloat
"""
from pyglet import gl
array = np.array(array)
if len(args) > 0:
array = np.append(array, args)
vector = (gl.GLfloat * len(array))(*array)
return vector
def light_to_gl(light, transform, lightN):
"""
Convert trimesh.scene.lighting.Light objects into
args for gl.glLightFv calls
Parameters
--------------
light : trimesh.scene.lighting.Light
Light object to be converted to GL
transform : (4, 4) float
Transformation matrix of light
lightN : int
Result of gl.GL_LIGHT0, gl.GL_LIGHT1, etc
Returns
--------------
multiarg : [tuple]
List of args to pass to gl.glLightFv eg:
[gl.glLightfb(*a) for a in multiarg]
"""
from pyglet import gl
# convert color to opengl
gl_color = vector_to_gl(light.color.astype(np.float64) / 255.0)
assert len(gl_color) == 4
# cartesian translation from matrix
gl_position = vector_to_gl(transform[:3, 3])
# create the different position and color arguments
args = [(lightN, gl.GL_POSITION, gl_position),
(lightN, gl.GL_SPECULAR, gl_color),
(lightN, gl.GL_DIFFUSE, gl_color),
(lightN, gl.GL_AMBIENT, gl_color)]
return args
| dajusc/trimesh | trimesh/rendering.py | Python | mit | 12,335 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('base.views',
url(r'^$', 'home', name='home'),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('blog.views',
(r'^blog/', include('blog.urls'), "main"),
)
| jradd/Django_web_dja | base/urls.py | Python | bsd-3-clause | 321 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.
from typing import List
from pyspark.ml.param import *
class HasMaxIter(Params):
"""
Mixin for param maxIter: max number of iterations (>= 0).
"""
maxIter: "Param[int]" = Param(
Params._dummy(),
"maxIter",
"max number of iterations (>= 0).",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasMaxIter, self).__init__()
def getMaxIter(self) -> int:
"""
Gets the value of maxIter or its default value.
"""
return self.getOrDefault(self.maxIter)
class HasRegParam(Params):
"""
Mixin for param regParam: regularization parameter (>= 0).
"""
regParam: "Param[float]" = Param(
Params._dummy(),
"regParam",
"regularization parameter (>= 0).",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasRegParam, self).__init__()
def getRegParam(self) -> float:
"""
Gets the value of regParam or its default value.
"""
return self.getOrDefault(self.regParam)
class HasFeaturesCol(Params):
"""
Mixin for param featuresCol: features column name.
"""
featuresCol: "Param[str]" = Param(
Params._dummy(),
"featuresCol",
"features column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasFeaturesCol, self).__init__()
self._setDefault(featuresCol="features")
def getFeaturesCol(self) -> str:
"""
Gets the value of featuresCol or its default value.
"""
return self.getOrDefault(self.featuresCol)
class HasLabelCol(Params):
"""
Mixin for param labelCol: label column name.
"""
labelCol: "Param[str]" = Param(
Params._dummy(),
"labelCol",
"label column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasLabelCol, self).__init__()
self._setDefault(labelCol="label")
def getLabelCol(self) -> str:
"""
Gets the value of labelCol or its default value.
"""
return self.getOrDefault(self.labelCol)
class HasPredictionCol(Params):
"""
Mixin for param predictionCol: prediction column name.
"""
predictionCol: "Param[str]" = Param(
Params._dummy(),
"predictionCol",
"prediction column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasPredictionCol, self).__init__()
self._setDefault(predictionCol="prediction")
def getPredictionCol(self) -> str:
"""
Gets the value of predictionCol or its default value.
"""
return self.getOrDefault(self.predictionCol)
class HasProbabilityCol(Params):
"""
Mixin for param probabilityCol: Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.
"""
probabilityCol: "Param[str]" = Param(
Params._dummy(),
"probabilityCol",
"Column name for predicted class conditional probabilities. Note: Not all models output well-calibrated probability estimates! These probabilities should be treated as confidences, not precise probabilities.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasProbabilityCol, self).__init__()
self._setDefault(probabilityCol="probability")
def getProbabilityCol(self) -> str:
"""
Gets the value of probabilityCol or its default value.
"""
return self.getOrDefault(self.probabilityCol)
class HasRawPredictionCol(Params):
"""
Mixin for param rawPredictionCol: raw prediction (a.k.a. confidence) column name.
"""
rawPredictionCol: "Param[str]" = Param(
Params._dummy(),
"rawPredictionCol",
"raw prediction (a.k.a. confidence) column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasRawPredictionCol, self).__init__()
self._setDefault(rawPredictionCol="rawPrediction")
def getRawPredictionCol(self) -> str:
"""
Gets the value of rawPredictionCol or its default value.
"""
return self.getOrDefault(self.rawPredictionCol)
class HasInputCol(Params):
"""
Mixin for param inputCol: input column name.
"""
inputCol: "Param[str]" = Param(
Params._dummy(),
"inputCol",
"input column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasInputCol, self).__init__()
def getInputCol(self) -> str:
"""
Gets the value of inputCol or its default value.
"""
return self.getOrDefault(self.inputCol)
class HasInputCols(Params):
"""
Mixin for param inputCols: input column names.
"""
inputCols: "Param[List[str]]" = Param(
Params._dummy(),
"inputCols",
"input column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super(HasInputCols, self).__init__()
def getInputCols(self) -> List[str]:
"""
Gets the value of inputCols or its default value.
"""
return self.getOrDefault(self.inputCols)
class HasOutputCol(Params):
"""
Mixin for param outputCol: output column name.
"""
outputCol: "Param[str]" = Param(
Params._dummy(),
"outputCol",
"output column name.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasOutputCol, self).__init__()
self._setDefault(outputCol=self.uid + "__output")
def getOutputCol(self) -> str:
"""
Gets the value of outputCol or its default value.
"""
return self.getOrDefault(self.outputCol)
class HasOutputCols(Params):
"""
Mixin for param outputCols: output column names.
"""
outputCols: "Param[List[str]]" = Param(
Params._dummy(),
"outputCols",
"output column names.",
typeConverter=TypeConverters.toListString,
)
def __init__(self) -> None:
super(HasOutputCols, self).__init__()
def getOutputCols(self) -> List[str]:
"""
Gets the value of outputCols or its default value.
"""
return self.getOrDefault(self.outputCols)
class HasNumFeatures(Params):
"""
Mixin for param numFeatures: Number of features. Should be greater than 0.
"""
numFeatures: "Param[int]" = Param(
Params._dummy(),
"numFeatures",
"Number of features. Should be greater than 0.",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasNumFeatures, self).__init__()
self._setDefault(numFeatures=262144)
def getNumFeatures(self) -> int:
"""
Gets the value of numFeatures or its default value.
"""
return self.getOrDefault(self.numFeatures)
class HasCheckpointInterval(Params):
"""
Mixin for param checkpointInterval: set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.
"""
checkpointInterval: "Param[int]" = Param(
Params._dummy(),
"checkpointInterval",
"set checkpoint interval (>= 1) or disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed every 10 iterations. Note: this setting will be ignored if the checkpoint directory is not set in the SparkContext.",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasCheckpointInterval, self).__init__()
def getCheckpointInterval(self) -> int:
"""
Gets the value of checkpointInterval or its default value.
"""
return self.getOrDefault(self.checkpointInterval)
class HasSeed(Params):
"""
Mixin for param seed: random seed.
"""
seed: "Param[int]" = Param(
Params._dummy(),
"seed",
"random seed.",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasSeed, self).__init__()
self._setDefault(seed=hash(type(self).__name__))
def getSeed(self) -> int:
"""
Gets the value of seed or its default value.
"""
return self.getOrDefault(self.seed)
class HasTol(Params):
"""
Mixin for param tol: the convergence tolerance for iterative algorithms (>= 0).
"""
tol: "Param[float]" = Param(
Params._dummy(),
"tol",
"the convergence tolerance for iterative algorithms (>= 0).",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasTol, self).__init__()
def getTol(self) -> float:
"""
Gets the value of tol or its default value.
"""
return self.getOrDefault(self.tol)
class HasRelativeError(Params):
"""
Mixin for param relativeError: the relative target precision for the approximate quantile algorithm. Must be in the range [0, 1]
"""
relativeError: "Param[float]" = Param(
Params._dummy(),
"relativeError",
"the relative target precision for the approximate quantile algorithm. Must be in the range [0, 1]",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasRelativeError, self).__init__()
self._setDefault(relativeError=0.001)
def getRelativeError(self) -> float:
"""
Gets the value of relativeError or its default value.
"""
return self.getOrDefault(self.relativeError)
class HasStepSize(Params):
"""
Mixin for param stepSize: Step size to be used for each iteration of optimization (>= 0).
"""
stepSize: "Param[float]" = Param(
Params._dummy(),
"stepSize",
"Step size to be used for each iteration of optimization (>= 0).",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasStepSize, self).__init__()
def getStepSize(self) -> float:
"""
Gets the value of stepSize or its default value.
"""
return self.getOrDefault(self.stepSize)
class HasHandleInvalid(Params):
"""
Mixin for param handleInvalid: how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.
"""
handleInvalid: "Param[str]" = Param(
Params._dummy(),
"handleInvalid",
"how to handle invalid entries. Options are skip (which will filter out rows with bad values), or error (which will throw an error). More options may be added later.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasHandleInvalid, self).__init__()
def getHandleInvalid(self) -> str:
"""
Gets the value of handleInvalid or its default value.
"""
return self.getOrDefault(self.handleInvalid)
class HasElasticNetParam(Params):
"""
Mixin for param elasticNetParam: the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.
"""
elasticNetParam: "Param[float]" = Param(
Params._dummy(),
"elasticNetParam",
"the ElasticNet mixing parameter, in range [0, 1]. For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty.",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasElasticNetParam, self).__init__()
self._setDefault(elasticNetParam=0.0)
def getElasticNetParam(self) -> float:
"""
Gets the value of elasticNetParam or its default value.
"""
return self.getOrDefault(self.elasticNetParam)
class HasFitIntercept(Params):
"""
Mixin for param fitIntercept: whether to fit an intercept term.
"""
fitIntercept: "Param[bool]" = Param(
Params._dummy(),
"fitIntercept",
"whether to fit an intercept term.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super(HasFitIntercept, self).__init__()
self._setDefault(fitIntercept=True)
def getFitIntercept(self) -> bool:
"""
Gets the value of fitIntercept or its default value.
"""
return self.getOrDefault(self.fitIntercept)
class HasStandardization(Params):
"""
Mixin for param standardization: whether to standardize the training features before fitting the model.
"""
standardization: "Param[bool]" = Param(
Params._dummy(),
"standardization",
"whether to standardize the training features before fitting the model.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super(HasStandardization, self).__init__()
self._setDefault(standardization=True)
def getStandardization(self) -> bool:
"""
Gets the value of standardization or its default value.
"""
return self.getOrDefault(self.standardization)
class HasThresholds(Params):
"""
Mixin for param thresholds: Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.
"""
thresholds: "Param[List[float]]" = Param(
Params._dummy(),
"thresholds",
"Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0, excepting that at most one value may be 0. The class with largest value p/t is predicted, where p is the original probability of that class and t is the class's threshold.",
typeConverter=TypeConverters.toListFloat,
)
def __init__(self) -> None:
super(HasThresholds, self).__init__()
def getThresholds(self) -> List[float]:
"""
Gets the value of thresholds or its default value.
"""
return self.getOrDefault(self.thresholds)
class HasThreshold(Params):
"""
Mixin for param threshold: threshold in binary classification prediction, in range [0, 1]
"""
threshold: "Param[float]" = Param(
Params._dummy(),
"threshold",
"threshold in binary classification prediction, in range [0, 1]",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasThreshold, self).__init__()
self._setDefault(threshold=0.5)
def getThreshold(self) -> float:
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.threshold)
class HasWeightCol(Params):
"""
Mixin for param weightCol: weight column name. If this is not set or empty, we treat all instance weights as 1.0.
"""
weightCol: "Param[str]" = Param(
Params._dummy(),
"weightCol",
"weight column name. If this is not set or empty, we treat all instance weights as 1.0.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasWeightCol, self).__init__()
def getWeightCol(self) -> str:
"""
Gets the value of weightCol or its default value.
"""
return self.getOrDefault(self.weightCol)
class HasSolver(Params):
"""
Mixin for param solver: the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.
"""
solver: "Param[str]" = Param(
Params._dummy(),
"solver",
"the solver algorithm for optimization. If this is not set or empty, default value is 'auto'.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasSolver, self).__init__()
self._setDefault(solver="auto")
def getSolver(self) -> str:
"""
Gets the value of solver or its default value.
"""
return self.getOrDefault(self.solver)
class HasVarianceCol(Params):
"""
Mixin for param varianceCol: column name for the biased sample variance of prediction.
"""
varianceCol: "Param[str]" = Param(
Params._dummy(),
"varianceCol",
"column name for the biased sample variance of prediction.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasVarianceCol, self).__init__()
def getVarianceCol(self) -> str:
"""
Gets the value of varianceCol or its default value.
"""
return self.getOrDefault(self.varianceCol)
class HasAggregationDepth(Params):
"""
Mixin for param aggregationDepth: suggested depth for treeAggregate (>= 2).
"""
aggregationDepth: "Param[int]" = Param(
Params._dummy(),
"aggregationDepth",
"suggested depth for treeAggregate (>= 2).",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasAggregationDepth, self).__init__()
self._setDefault(aggregationDepth=2)
def getAggregationDepth(self) -> int:
"""
Gets the value of aggregationDepth or its default value.
"""
return self.getOrDefault(self.aggregationDepth)
class HasParallelism(Params):
"""
Mixin for param parallelism: the number of threads to use when running parallel algorithms (>= 1).
"""
parallelism: "Param[int]" = Param(
Params._dummy(),
"parallelism",
"the number of threads to use when running parallel algorithms (>= 1).",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasParallelism, self).__init__()
self._setDefault(parallelism=1)
def getParallelism(self) -> int:
"""
Gets the value of parallelism or its default value.
"""
return self.getOrDefault(self.parallelism)
class HasCollectSubModels(Params):
"""
Mixin for param collectSubModels: Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.
"""
collectSubModels: "Param[bool]" = Param(
Params._dummy(),
"collectSubModels",
"Param for whether to collect a list of sub-models trained during tuning. If set to false, then only the single best sub-model will be available after fitting. If set to true, then all sub-models will be available. Warning: For large models, collecting all sub-models can cause OOMs on the Spark driver.",
typeConverter=TypeConverters.toBoolean,
)
def __init__(self) -> None:
super(HasCollectSubModels, self).__init__()
self._setDefault(collectSubModels=False)
def getCollectSubModels(self) -> bool:
"""
Gets the value of collectSubModels or its default value.
"""
return self.getOrDefault(self.collectSubModels)
class HasLoss(Params):
"""
Mixin for param loss: the loss function to be optimized.
"""
loss: "Param[str]" = Param(
Params._dummy(),
"loss",
"the loss function to be optimized.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasLoss, self).__init__()
def getLoss(self) -> str:
"""
Gets the value of loss or its default value.
"""
return self.getOrDefault(self.loss)
class HasDistanceMeasure(Params):
"""
Mixin for param distanceMeasure: the distance measure. Supported options: 'euclidean' and 'cosine'.
"""
distanceMeasure: "Param[str]" = Param(
Params._dummy(),
"distanceMeasure",
"the distance measure. Supported options: 'euclidean' and 'cosine'.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasDistanceMeasure, self).__init__()
self._setDefault(distanceMeasure="euclidean")
def getDistanceMeasure(self) -> str:
"""
Gets the value of distanceMeasure or its default value.
"""
return self.getOrDefault(self.distanceMeasure)
class HasValidationIndicatorCol(Params):
"""
Mixin for param validationIndicatorCol: name of the column that indicates whether each row is for training or for validation. False indicates training; true indicates validation.
"""
validationIndicatorCol: "Param[str]" = Param(
Params._dummy(),
"validationIndicatorCol",
"name of the column that indicates whether each row is for training or for validation. False indicates training; true indicates validation.",
typeConverter=TypeConverters.toString,
)
def __init__(self) -> None:
super(HasValidationIndicatorCol, self).__init__()
def getValidationIndicatorCol(self) -> str:
"""
Gets the value of validationIndicatorCol or its default value.
"""
return self.getOrDefault(self.validationIndicatorCol)
class HasBlockSize(Params):
"""
Mixin for param blockSize: block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data.
"""
blockSize: "Param[int]" = Param(
Params._dummy(),
"blockSize",
"block size for stacking input data in matrices. Data is stacked within partitions. If block size is more than remaining data in a partition then it is adjusted to the size of this data.",
typeConverter=TypeConverters.toInt,
)
def __init__(self) -> None:
super(HasBlockSize, self).__init__()
def getBlockSize(self) -> int:
"""
Gets the value of blockSize or its default value.
"""
return self.getOrDefault(self.blockSize)
class HasMaxBlockSizeInMB(Params):
"""
Mixin for param maxBlockSizeInMB: maximum memory in MB for stacking input data into blocks. Data is stacked within partitions. If more than remaining data size in a partition then it is adjusted to the data size. Default 0.0 represents choosing optimal value, depends on specific algorithm. Must be >= 0.
"""
maxBlockSizeInMB: "Param[float]" = Param(
Params._dummy(),
"maxBlockSizeInMB",
"maximum memory in MB for stacking input data into blocks. Data is stacked within partitions. If more than remaining data size in a partition then it is adjusted to the data size. Default 0.0 represents choosing optimal value, depends on specific algorithm. Must be >= 0.",
typeConverter=TypeConverters.toFloat,
)
def __init__(self) -> None:
super(HasMaxBlockSizeInMB, self).__init__()
self._setDefault(maxBlockSizeInMB=0.0)
def getMaxBlockSizeInMB(self) -> float:
"""
Gets the value of maxBlockSizeInMB or its default value.
"""
return self.getOrDefault(self.maxBlockSizeInMB)
| holdenk/spark | python/pyspark/ml/param/shared.py | Python | apache-2.0 | 24,706 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgslineburstsymbollayer.py
---------------------
Date : October 2021
Copyright : (C) 2021 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'October 2021'
__copyright__ = '(C) 2021, Nyall Dawson'
import qgis # NOQA
from qgis.PyQt.QtCore import QDir, Qt
from qgis.PyQt.QtGui import QImage, QColor, QPainter
from qgis.core import (QgsGeometry,
Qgis,
QgsRenderContext,
QgsFeature,
QgsMapSettings,
QgsRenderChecker,
QgsGradientColorRamp,
QgsGradientStop,
QgsLineSymbol,
QgsLineburstSymbolLayer,
QgsSymbolLayer,
QgsProperty
)
from qgis.testing import unittest, start_app
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLineburstSymbolLayer(unittest.TestCase):
def setUp(self):
self.report = "<h1>Python QgsLineburstSymbolLayer Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testTwoColor(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(8)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_two_color', 'lineburst_two_color', rendered_image))
def testDataDefinedColors(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(8)
line.setDataDefinedProperty(QgsSymbolLayer.PropertyStrokeColor, QgsProperty.fromExpression("'orange'"))
line.setDataDefinedProperty(QgsSymbolLayer.PropertySecondaryColor, QgsProperty.fromExpression("'purple'"))
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_datadefined_color', 'lineburst_datadefined_color', rendered_image))
def testColorRamp(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setGradientColorType(Qgis.GradientColorSource.ColorRamp)
line.setColorRamp(QgsGradientColorRamp(QColor(200, 0, 0), QColor(0, 200, 0), False,
[QgsGradientStop(0.5, QColor(0, 0, 200))]))
line.setWidth(8)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_colorramp', 'lineburst_colorramp', rendered_image))
def testRenderClosedRing(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(8)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 0, 10 10, 0 10, 0 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_closed', 'lineburst_closed', rendered_image))
def testRenderFlatCap(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(8)
line.setPenCapStyle(Qt.FlatCap)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_flatcap', 'lineburst_flatcap', rendered_image))
def testRenderSquareCap(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(8)
line.setPenCapStyle(Qt.SquareCap)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(2 2, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_squarecap', 'lineburst_squarecap', rendered_image))
def testRenderMiterJoin(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(8)
line.setPenJoinStyle(Qt.MiterJoin)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(0 15, 0 0, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_miterjoin', 'lineburst_miterjoin', rendered_image))
def testRenderBevelJoin(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(8)
line.setPenJoinStyle(Qt.BevelJoin)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(2 2, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_beveljoin', 'lineburst_beveljoin', rendered_image))
def testLineOffset(self):
s = QgsLineSymbol()
s.deleteSymbolLayer(0)
line = QgsLineburstSymbolLayer()
line.setColor(QColor(255, 0, 0))
line.setColor2(QColor(0, 255, 0))
line.setWidth(5)
line.setOffset(5)
s.appendSymbolLayer(line.clone())
g = QgsGeometry.fromWkt('LineString(2 2, 10 10, 10 0)')
rendered_image = self.renderGeometry(s, g)
self.assertTrue(self.imageCheck('lineburst_offset', 'lineburst_offset', rendered_image))
def renderGeometry(self, symbol, geom, buffer=20):
f = QgsFeature()
f.setGeometry(geom)
image = QImage(200, 200, QImage.Format_RGB32)
painter = QPainter()
ms = QgsMapSettings()
extent = geom.get().boundingBox()
# buffer extent by 10%
if extent.width() > 0:
extent = extent.buffered((extent.height() + extent.width()) / buffer)
else:
extent = extent.buffered(buffer / 2)
ms.setExtent(extent)
ms.setOutputSize(image.size())
context = QgsRenderContext.fromMapSettings(ms)
context.setPainter(painter)
context.setScaleFactor(96 / 25.4) # 96 DPI
context.expressionContext().setFeature(f)
painter.begin(image)
try:
image.fill(QColor(0, 0, 0))
symbol.startRender(context)
symbol.renderFeature(f, context)
symbol.stopRender(context)
finally:
painter.end()
return image
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'symbol_' + name + ".png"
image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("symbol_lineburst")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
| uclaros/QGIS | tests/src/python/test_qgslineburstsymbollayer.py | Python | gpl-2.0 | 8,941 |
# flask, html + json
from flask import Flask, render_template, request, jsonify, json
# send http post request
from urllib import urlopen, urlencode
# websocket host
from flask_sockets import Sockets
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
# websocket client
from websocket import create_connection
import message as m
app = Flask(__name__)
sockets = Sockets(app)
@app.route("/")
def selector():
return render_template("selector.html")
@app.route("/rps")
def rps_renderer():
return render_template('rps.html', options={1: 'Rock',
2: 'Paper',
3: 'Scissors'})
@app.route("/rpsls")
def rpsls_renderer():
return render_template('rps.html', options={1: 'Rock',
2: 'Paper',
3: 'Scissors',
4: 'Lizard',
5: 'Spock'})
@app.route("/rest/rps")
def restful_rps_renderer():
return render_template('restful_rps.html', options={1: 'Rock',
2: 'Paper',
3: 'Scissors'})
@app.route("/rest/rpsls")
def restful_rpsls_renderer():
return render_template('restful_rps.html', options={1: 'Rock',
2: 'Paper',
3: 'Scissors',
4: 'Lizard',
5: 'Spock'})
@app.route("/webservice/rps/history")
def rps_history():
history = json.loads(urlopen('http://backend:5000/rps/history').read())
return jsonify(history)
@sockets.route("/ws/rps")
def rps_ws(browser_ws):
backend_ws = create_connection('ws://backend:5000/ws/rps')
while not browser_ws.closed:
m.forward_in(browser_ws, backend_ws)
m.forward_out(backend_ws, browser_ws)
# message = browser_ws.receive()
# if message:
# backend_ws.send(message)
# result = backend_ws.recv()
# if result:
# browser_ws.send(result)
# backend_ws.close()
@app.route("/webservice/rps", methods=["POST"])
def rps_backend():
p1 = request.form['player1']
query_args = {'player1': p1}
data = urlencode(query_args)
response = urlopen('http://backend:5000/rps', data).read()
return response
if __name__ == "__main__":
app.debug = True
app.config['SECRET_KEY'] = 'secret!'
server = pywsgi.WSGIServer(('', 8080),
application=app,
handler_class=WebSocketHandler)
server.serve_forever()
| vagoston/mindtricks | frontend/frontend.py | Python | mit | 2,876 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_None/trend_MovingAverage/cycle_0/ar_/test_artificial_1024_None_MovingAverage_0__20.py | Python | bsd-3-clause | 264 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import find_packages
from setuptools import setup
import ricecooker
readme = open("README.md").read()
with open("docs/history.rst") as history_file:
history = history_file.read()
requirements = [
"pytest>=3.0.2",
"requests>=2.11.1",
"le_utils>=0.1.26",
"validators", # TODO: check if this is necessary
"requests_file",
"beautifulsoup4>=4.6.3,<4.9.0", # pinned to match versions in le-pycaption
"selenium==3.0.1",
"youtube-dl>=2020.6.16.1",
"html5lib",
"cachecontrol==0.12.0",
"lockfile==0.12.2", # TODO: check if this is necessary
"css-html-js-minify==2.2.2",
"mock==2.0.0",
"pypdf2>=1.26.0",
"dictdiffer>=0.8.0",
"Pillow==8.2.0",
"colorlog>=4.1.0,<4.2",
"PyYAML>=5.3.1",
"Jinja2>=2.10",
"chardet==4.0.0",
"ffmpy>=0.2.2",
"pdf2image==1.11.0",
"le-pycaption>=2.2.0a1",
"EbookLib>=0.17.1",
]
setup(
name="ricecooker",
version=ricecooker.__version__,
description="API for adding content to the Kolibri content curation server",
long_description=readme + "\n\n" + history,
long_description_content_type="text/markdown",
author="Learning Equality",
author_email="dev@learningequality.org",
url="https://github.com/learningequality/ricecooker",
packages=find_packages(),
package_dir={"ricecooker": "ricecooker"},
entry_points={
"console_scripts": [
"corrections = ricecooker.utils.corrections:correctionsmain",
"jiro = ricecooker.cli:main",
]
},
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords="ricecooker",
classifiers=[
"Intended Audience :: Developers",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Topic :: Education",
],
test_suite="tests",
)
| learningequality/ricecooker | setup.py | Python | mit | 2,155 |
#!/usr/bin/env python2
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''Read meta information from epub files'''
import os, re, posixpath
from cStringIO import StringIO
from contextlib import closing
from future_builtins import map
from calibre.utils.zipfile import ZipFile, BadZipfile, safe_replace
from calibre.utils.localunzip import LocalZipFile
from calibre.ebooks.BeautifulSoup import BeautifulStoneSoup
from calibre.ebooks.metadata import MetaInformation
from calibre.ebooks.metadata.opf2 import OPF
from calibre.ptempfile import TemporaryDirectory, PersistentTemporaryFile
from calibre import CurrentDir, walk
from calibre.constants import isosx
from calibre.utils.localization import lang_as_iso639_1
class EPubException(Exception):
pass
class OCFException(EPubException):
pass
class ContainerException(OCFException):
pass
class Container(dict):
def __init__(self, stream=None):
if not stream:
return
soup = BeautifulStoneSoup(stream.read())
container = soup.find(name=re.compile(r'container$', re.I))
if not container:
raise OCFException("<container> element missing")
if container.get('version', None) != '1.0':
raise EPubException("unsupported version of OCF")
rootfiles = container.find(re.compile(r'rootfiles$', re.I))
if not rootfiles:
raise EPubException("<rootfiles/> element missing")
for rootfile in rootfiles.findAll(re.compile(r'rootfile$', re.I)):
try:
self[rootfile['media-type']] = rootfile['full-path']
except KeyError:
raise EPubException("<rootfile/> element malformed")
class OCF(object):
MIMETYPE = 'application/epub+zip'
CONTAINER_PATH = 'META-INF/container.xml'
ENCRYPTION_PATH = 'META-INF/encryption.xml'
def __init__(self):
raise NotImplementedError('Abstract base class')
class Encryption(object):
OBFUSCATION_ALGORITHMS = frozenset(['http://ns.adobe.com/pdf/enc#RC',
'http://www.idpf.org/2008/embedding'])
def __init__(self, raw):
from lxml import etree
self.root = etree.fromstring(raw) if raw else None
self.entries = {}
if self.root is not None:
for em in self.root.xpath('descendant::*[contains(name(), "EncryptionMethod")]'):
algorithm = em.get('Algorithm', '')
cr = em.getparent().xpath('descendant::*[contains(name(), "CipherReference")]')
if cr:
uri = cr[0].get('URI', '')
if uri and algorithm:
self.entries[uri] = algorithm
def is_encrypted(self, uri):
algo = self.entries.get(uri, None)
return algo is not None and algo not in self.OBFUSCATION_ALGORITHMS
class OCFReader(OCF):
def __init__(self):
try:
mimetype = self.open('mimetype').read().rstrip()
if mimetype != OCF.MIMETYPE:
print 'WARNING: Invalid mimetype declaration', mimetype
except:
print 'WARNING: Epub doesn\'t contain a mimetype declaration'
try:
with closing(self.open(OCF.CONTAINER_PATH)) as f:
self.container = Container(f)
except KeyError:
raise EPubException("missing OCF container.xml file")
self.opf_path = self.container[OPF.MIMETYPE]
if not self.opf_path:
raise EPubException("missing OPF package file entry in container")
try:
with closing(self.open(self.opf_path)) as f:
self.opf = OPF(f, self.root, populate_spine=False)
except KeyError:
raise EPubException("missing OPF package file")
try:
with closing(self.open(self.ENCRYPTION_PATH)) as f:
self.encryption_meta = Encryption(f.read())
except:
self.encryption_meta = Encryption(None)
class OCFZipReader(OCFReader):
def __init__(self, stream, mode='r', root=None):
if isinstance(stream, (LocalZipFile, ZipFile)):
self.archive = stream
else:
try:
self.archive = ZipFile(stream, mode=mode)
except BadZipfile:
raise EPubException("not a ZIP .epub OCF container")
self.root = root
if self.root is None:
name = getattr(stream, 'name', False)
if name:
self.root = os.path.abspath(os.path.dirname(name))
else:
self.root = os.getcwdu()
super(OCFZipReader, self).__init__()
def open(self, name, mode='r'):
if isinstance(self.archive, LocalZipFile):
return self.archive.open(name)
return StringIO(self.archive.read(name))
def get_zip_reader(stream, root=None):
try:
zf = ZipFile(stream, mode='r')
except:
stream.seek(0)
zf = LocalZipFile(stream)
return OCFZipReader(zf, root=root)
class OCFDirReader(OCFReader):
def __init__(self, path):
self.root = path
super(OCFDirReader, self).__init__()
def open(self, path, *args, **kwargs):
return open(os.path.join(self.root, path), *args, **kwargs)
def render_cover(opf, opf_path, zf, reader=None):
from calibre.ebooks import render_html_svg_workaround
from calibre.utils.logging import default_log
cpage = opf.first_spine_item()
if not cpage:
return
if reader is not None and reader.encryption_meta.is_encrypted(cpage):
return
with TemporaryDirectory('_epub_meta') as tdir:
with CurrentDir(tdir):
zf.extractall()
opf_path = opf_path.replace('/', os.sep)
cpage = os.path.join(tdir, os.path.dirname(opf_path), cpage)
if not os.path.exists(cpage):
return
if isosx:
# On OS X trying to render a HTML cover which uses embedded
# fonts more than once in the same process causes a crash in Qt
# so be safe and remove the fonts as well as any @font-face
# rules
for f in walk('.'):
if os.path.splitext(f)[1].lower() in ('.ttf', '.otf'):
os.remove(f)
ffpat = re.compile(br'@font-face.*?{.*?}',
re.DOTALL|re.IGNORECASE)
with open(cpage, 'r+b') as f:
raw = f.read()
f.truncate(0)
f.seek(0)
raw = ffpat.sub(b'', raw)
f.write(raw)
from calibre.ebooks.chardet import xml_to_unicode
raw = xml_to_unicode(raw,
strip_encoding_pats=True, resolve_entities=True)[0]
from lxml import html
for link in html.fromstring(raw).xpath('//link'):
href = link.get('href', '')
if href:
path = os.path.join(os.path.dirname(cpage), href)
if os.path.exists(path):
with open(path, 'r+b') as f:
raw = f.read()
f.truncate(0)
f.seek(0)
raw = ffpat.sub(b'', raw)
f.write(raw)
return render_html_svg_workaround(cpage, default_log)
def get_cover(opf, opf_path, stream, reader=None):
raster_cover = opf.raster_cover
stream.seek(0)
try:
zf = ZipFile(stream)
except:
stream.seek(0)
zf = LocalZipFile(stream)
if raster_cover:
base = posixpath.dirname(opf_path)
cpath = posixpath.normpath(posixpath.join(base, raster_cover))
if reader is not None and \
reader.encryption_meta.is_encrypted(cpath):
return
try:
member = zf.getinfo(cpath)
except:
pass
else:
f = zf.open(member)
data = f.read()
f.close()
zf.close()
return data
return render_cover(opf, opf_path, zf, reader=reader)
def get_metadata(stream, extract_cover=True):
""" Return metadata as a :class:`Metadata` object """
stream.seek(0)
reader = get_zip_reader(stream)
mi = reader.opf.to_book_metadata()
if extract_cover:
try:
cdata = get_cover(reader.opf, reader.opf_path, stream, reader=reader)
if cdata is not None:
mi.cover_data = ('jpg', cdata)
except:
import traceback
traceback.print_exc()
mi.timestamp = None
return mi
def get_quick_metadata(stream):
return get_metadata(stream, False)
def _write_new_cover(new_cdata, cpath):
from calibre.utils.img import save_cover_data_to
new_cover = PersistentTemporaryFile(suffix=os.path.splitext(cpath)[1])
new_cover.close()
save_cover_data_to(new_cdata, new_cover.name)
return new_cover
def normalize_languages(opf_languages, mi_languages):
' Preserve original country codes and use 2-letter lang codes where possible '
from calibre.spell import parse_lang_code
def parse(x):
try:
return parse_lang_code(x)
except ValueError:
return None
opf_languages = filter(None, map(parse, opf_languages))
cc_map = {c.langcode:c.countrycode for c in opf_languages}
mi_languages = filter(None, map(parse, mi_languages))
def norm(x):
lc = x.langcode
cc = x.countrycode or cc_map.get(lc, None)
lc = lang_as_iso639_1(lc) or lc
if cc:
lc += '-' + cc
return lc
return list(map(norm, mi_languages))
def update_metadata(opf, mi, apply_null=False, update_timestamp=False, force_identifiers=False):
for x in ('guide', 'toc', 'manifest', 'spine'):
setattr(mi, x, None)
if mi.languages:
mi.languages = normalize_languages(list(opf.raw_languages) or [], mi.languages)
opf.smart_update(mi, apply_null=apply_null)
if getattr(mi, 'uuid', None):
opf.application_id = mi.uuid
if apply_null or force_identifiers:
opf.set_identifiers(mi.get_identifiers())
else:
orig = opf.get_identifiers()
orig.update(mi.get_identifiers())
opf.set_identifiers({k:v for k, v in orig.iteritems() if k and v})
if update_timestamp and mi.timestamp is not None:
opf.timestamp = mi.timestamp
def set_metadata(stream, mi, apply_null=False, update_timestamp=False, force_identifiers=False):
stream.seek(0)
reader = get_zip_reader(stream, root=os.getcwdu())
raster_cover = reader.opf.raster_cover
mi = MetaInformation(mi)
new_cdata = None
replacements = {}
try:
new_cdata = mi.cover_data[1]
if not new_cdata:
raise Exception('no cover')
except:
try:
new_cdata = open(mi.cover, 'rb').read()
except:
pass
new_cover = cpath = None
if new_cdata and raster_cover:
try:
cpath = posixpath.join(posixpath.dirname(reader.opf_path),
raster_cover)
cover_replacable = not reader.encryption_meta.is_encrypted(cpath) and \
os.path.splitext(cpath)[1].lower() in ('.png', '.jpg', '.jpeg')
if cover_replacable:
new_cover = _write_new_cover(new_cdata, cpath)
replacements[cpath] = open(new_cover.name, 'rb')
except Exception:
import traceback
traceback.print_exc()
update_metadata(reader.opf, mi, apply_null=apply_null,
update_timestamp=update_timestamp, force_identifiers=force_identifiers)
newopf = StringIO(reader.opf.render())
if isinstance(reader.archive, LocalZipFile):
reader.archive.safe_replace(reader.container[OPF.MIMETYPE], newopf,
extra_replacements=replacements)
else:
safe_replace(stream, reader.container[OPF.MIMETYPE], newopf,
extra_replacements=replacements)
try:
if cpath is not None:
replacements[cpath].close()
os.remove(replacements[cpath].name)
except:
pass
| timpalpant/calibre | src/calibre/ebooks/metadata/epub.py | Python | gpl-3.0 | 12,365 |
import datetime
import time
import math
import cv2
import imutils
import numpy
from controllers.logcontroller import LogController
from models.frame import Frame
import settings
#the run interval before logging in seconds
TIME_INTERVAL = 5
MIN_AREA = 500
class VideoController:
"""
A class for managing a traffic camera feed.
Initializing will create itself a log file
Provides the function runInfinite that can
cycle through the frames of a stationary traffic
camera feed and write the average number of cars
detected over the Time Interval once at the end
of every interval
"""
def __init__(self, video_path, world):
self.world = world
self.capture = cv2.VideoCapture(video_path)
self.lc = LogController(world)
self.fgbs = cv2.BackgroundSubtractorMOG2(history=1000,
varThreshold = 500,
bShadowDetection = False)
self.detector = self._buildBlobDetector()
def runInfinite(self,tkroot=None):
"""
Runs the video in sets of intervals computing and logging averages
after TIME_INTERVAL seconds.
"""
while(True):
try:
averages = self._runInterval()
timestamp = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
if not settings.DEBUG:
self.lc.writeToLog(timestamp,averages)
#===============================================================
# if tkroot is not None:
# tkroot.addLog(packet)
# #retrieve pause signal from button press in tk
# # will only be caught after Time interval elapses
# play = tkroot.runUpdate()
# if not(play):
# break
#===============================================================
except:
break
def _runInterval(self):
"""
A gui function that runs a TIME_INTERVAL interval
and returns a dictionary using the same keys as
the command args in --world to provide the average
count of traffic detected moving in the world directions
"""
frames_run = 0
#reinitialize the traffic counts
averages = {}
for k in self.world.keys():
averages[k] = 0
previous_keypoints = None
timeout = time.time() + TIME_INTERVAL
while time.time() < timeout:
iterDict = self._runIteration(previous_keypoints)
previous_keypoints = iterDict['keypoints']
averages_dict = iterDict['averages']
frames_run += 1
for k,v in averages_dict.iteritems():
averages[k] += v
#compute average over interval
for k in self.world.keys():
averages[k] = math.ceil(averages[k] / float(frames_run))
return averages
def _runIteration(self, previous_keypoints, return_frame=False):
"""
The function of the controller that processes
the next frame of the video and calculates the number
of vehicles. It only processes a single image and therefore
must be called inside a loop like runinterval
The flag return_frame can turned on to return the frame
with the detected vehicles and a summary count drawn
"""
frame = self._getFrame()
keypoints, averages = frame.analyzeFrame(previous_keypoints, self.world)
return {'keypoints':keypoints, 'averages':averages}
def _getFrame(self):
flag,img = self.capture.read()
if not flag:
raise Exception("Could not read video")
frame = Frame(img, self.fgbs, self.detector)
return frame
def _buildBlobDetector(self):
"""
creates a blob detector that can be used to identify the relevant
blobs of a background subtracted image. Algorithm can be improved
by supplying a truly empty frame to better remove noise.
"""
params = cv2.SimpleBlobDetector_Params()
params.filterByColor = True
params.blobColor = 255
params.filterByConvexity = True
params.minConvexity = 0.87
return cv2.SimpleBlobDetector(params)
def stopVideo(self):
self.capture.release()
| cmput402w2016/CMPUT402W16T2 | src/controllers/videocontroller.py | Python | apache-2.0 | 4,839 |
#
# UAVCAN DSDL file parser
#
# Copyright (C) 2014-2015 Pavel Kirienko <pavel.kirienko@gmail.com>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import os, re, logging
from io import StringIO
from .signature import Signature, compute_signature
from .common import DsdlException, pretty_filename, bytes_from_crc64
from .type_limits import get_unsigned_integer_range, get_signed_integer_range, get_float_range
# Python 2.7 compatibility
try:
str = unicode # @ReservedAssignment
except NameError:
pass
try:
long(1)
except NameError:
long = int # @ReservedAssignment
MAX_FULL_TYPE_NAME_LEN = 80
SERVICE_DATA_TYPE_ID_MAX = 255
MESSAGE_DATA_TYPE_ID_MAX = 65535
class Type:
'''
Common type description. The specialized type description classes inherit from this one.
Fields:
full_name Full type name string, e.g. "uavcan.protocol.NodeStatus"
category Any CATEGORY_*
'''
CATEGORY_PRIMITIVE = 0
CATEGORY_ARRAY = 1
CATEGORY_COMPOUND = 2
CATEGORY_VOID = 3
def __init__(self, full_name, category):
self.full_name = str(full_name)
self.category = category
def __str__(self):
return self.get_normalized_definition()
def get_data_type_signature(self):
return None
__repr__ = __str__
class PrimitiveType(Type):
'''
Primitive type description, e.g. bool or float16.
Fields:
kind Any KIND_*
bitlen Bit length, 1 to 64
cast_mode Any CAST_MODE_*
value_range Tuple containing min and max values: (min, max)
'''
KIND_BOOLEAN = 0
KIND_UNSIGNED_INT = 1
KIND_SIGNED_INT = 2
KIND_FLOAT = 3
CAST_MODE_SATURATED = 0
CAST_MODE_TRUNCATED = 1
def __init__(self, kind, bitlen, cast_mode):
self.kind = kind
self.bitlen = bitlen
self.cast_mode = cast_mode
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_PRIMITIVE)
self.value_range = {
PrimitiveType.KIND_BOOLEAN: get_unsigned_integer_range,
PrimitiveType.KIND_UNSIGNED_INT: get_unsigned_integer_range,
PrimitiveType.KIND_SIGNED_INT: get_signed_integer_range,
PrimitiveType.KIND_FLOAT: get_float_range
}[self.kind](bitlen)
def get_normalized_definition(self):
'''Please refer to the specification for details about normalized definitions.'''
cast_mode = 'saturated' if self.cast_mode == PrimitiveType.CAST_MODE_SATURATED else 'truncated'
primary_type = {
PrimitiveType.KIND_BOOLEAN: 'bool',
PrimitiveType.KIND_UNSIGNED_INT: 'uint' + str(self.bitlen),
PrimitiveType.KIND_SIGNED_INT: 'int' + str(self.bitlen),
PrimitiveType.KIND_FLOAT: 'float' + str(self.bitlen)
}[self.kind]
return cast_mode + ' ' + primary_type
def validate_value_range(self, value):
'''Checks value range, throws DsdlException if the value cannot be represented by this type.'''
low, high = self.value_range
if not low <= value <= high:
error('Value [%s] is out of range %s', value, self.value_range)
def get_max_bitlen(self):
'''Returns type bit length.'''
return self.bitlen
class ArrayType(Type):
'''
Array type description, e.g. float32[8], uint12[<34].
Fields:
value_type Description of the array value type; the type of this field inherits Type, e.g. PrimitiveType
mode Any MODE_*
max_size Maximum number of elements in the array
'''
MODE_STATIC = 0
MODE_DYNAMIC = 1
def __init__(self, value_type, mode, max_size):
self.value_type = value_type
self.mode = mode
self.max_size = max_size
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_ARRAY)
def get_normalized_definition(self):
'''Please refer to the specification for details about normalized definitions.'''
typedef = self.value_type.get_normalized_definition()
return ('%s[<=%d]' if self.mode == ArrayType.MODE_DYNAMIC else '%s[%d]') % (typedef, self.max_size)
def get_max_bitlen(self):
'''Returns total maximum bit length of the array, including length field if applicable.'''
payload_max_bitlen = self.max_size * self.value_type.get_max_bitlen()
return {
self.MODE_DYNAMIC: payload_max_bitlen + self.max_size.bit_length(),
self.MODE_STATIC: payload_max_bitlen
}[self.mode]
def get_data_type_signature(self):
return self.value_type.get_data_type_signature()
class CompoundType(Type):
'''
Compound type description, e.g. uavcan.protocol.NodeStatus.
Fields:
source_file Path to the DSDL definition file for this type
default_dtid Default Data Type ID, if specified, None otherwise
kind Any KIND_*
source_text Raw DSDL definition text (as is, with comments and the original formatting)
Fields if kind == KIND_SERVICE:
request_fields Request struct field list, the type of each element is Field
response_fields Response struct field list
request_constants Request struct constant list, the type of each element is Constant
response_constants Response struct constant list
request_union Boolean indicating whether the request struct is a union
response_union Boolean indicating whether the response struct is a union
Fields if kind == KIND_MESSAGE:
fields Field list, the type of each element is Field
constants Constant list, the type of each element is Constant
union Boolean indicating whether the message struct is a union
Extra methods if kind == KIND_SERVICE:
get_max_bitlen_request() Returns maximum total bit length for the serialized request struct
get_max_bitlen_response() Same for the response struct
Extra methods if kind == KIND_MESSAGE:
get_max_bitlen() Returns maximum total bit length for the serialized struct
'''
KIND_SERVICE = 0
KIND_MESSAGE = 1
def __init__(self, full_name, kind, source_file, default_dtid, source_text):
Type.__init__(self, full_name, Type.CATEGORY_COMPOUND)
self.source_file = source_file
self.default_dtid = default_dtid
self.kind = kind
self.source_text = source_text
def compute_max_bitlen(flds, union):
if len(flds) == 0:
return 0
lens = [x.type.get_max_bitlen() for x in flds]
if union:
return max(lens) + max(len(flds) - 1, 1).bit_length()
else:
return sum(lens)
if kind == CompoundType.KIND_SERVICE:
self.request_fields = []
self.response_fields = []
self.request_constants = []
self.response_constants = []
self.get_max_bitlen_request = lambda: compute_max_bitlen(self.request_fields, self.request_union)
self.get_max_bitlen_response = lambda: compute_max_bitlen(self.response_fields, self.response_union)
self.request_union = False
self.response_union = False
elif kind == CompoundType.KIND_MESSAGE:
self.fields = []
self.constants = []
self.get_max_bitlen = lambda: compute_max_bitlen(self.fields, self.union)
self.union = False
else:
error('Compound type of unknown kind [%s]', kind)
def get_dsdl_signature_source_definition(self):
'''
Returns normalized DSDL definition text.
Please refer to the specification for details about normalized DSDL definitions.
'''
txt = StringIO()
txt.write(self.full_name + '\n')
adjoin = lambda attrs: txt.write('\n'.join(x.get_normalized_definition() for x in attrs) + '\n')
if self.kind == CompoundType.KIND_SERVICE:
if self.request_union:
txt.write('\n@union\n')
adjoin(self.request_fields)
txt.write('\n---\n')
if self.response_union:
txt.write('\n@union\n')
adjoin(self.response_fields)
elif self.kind == CompoundType.KIND_MESSAGE:
if self.union:
txt.write('\n@union\n')
adjoin(self.fields)
else:
error('Compound type of unknown kind [%s]', self.kind)
return txt.getvalue().strip().replace('\n\n\n', '\n').replace('\n\n', '\n')
def get_dsdl_signature(self):
'''
Computes DSDL signature of this type.
Please refer to the specification for details about signatures.
'''
return compute_signature(self.get_dsdl_signature_source_definition())
def get_normalized_definition(self):
'''Returns full type name string, e.g. "uavcan.protocol.NodeStatus"'''
return self.full_name
def get_data_type_signature(self):
'''
Computes data type signature of this type. The data type signature is
guaranteed to match only if all nested data structures are compatible.
Please refer to the specification for details about signatures.
'''
sig = Signature(self.get_dsdl_signature())
fields = self.request_fields + self.response_fields \
if self.kind == CompoundType.KIND_SERVICE else self.fields
for field in fields:
field_sig = field.type.get_data_type_signature()
if field_sig is not None:
sig_value = sig.get_value()
sig.add(bytes_from_crc64(field_sig))
sig.add(bytes_from_crc64(sig_value))
return sig.get_value()
class VoidType(Type):
'''
Void type description, e.g. void2.
Fields:
bitlen Bit length, 1 to 64
'''
def __init__(self, bitlen):
self.bitlen = bitlen
Type.__init__(self, self.get_normalized_definition(), Type.CATEGORY_VOID)
def get_normalized_definition(self):
'''Please refer to the specification for details about normalized definitions.'''
return 'void' + str(self.bitlen)
def get_max_bitlen(self):
'''Returns type bit length.'''
return self.bitlen
class Attribute:
'''
Base class of an attribute description.
Fields:
type Attribute type description, the type of this field inherits the class Type, e.g. PrimitiveType
name Attribute name string
'''
def __init__(self, type, name): # @ReservedAssignment
self.type = type
self.name = name
def __str__(self):
return self.get_normalized_definition()
__repr__ = __str__
class Field(Attribute):
'''
Field description.
Does not add new fields to Attribute.
If type is void, the name will be None.
'''
def get_normalized_definition(self):
if self.type.category == self.type.CATEGORY_VOID:
return self.type.get_normalized_definition()
else:
return '%s %s' % (self.type.get_normalized_definition(), self.name)
class Constant(Attribute):
'''
Constant description.
Fields:
init_expression Constant initialization expression string, e.g. "2+2" or "'\x66'"
value Computed result of the initialization expression in the final type (e.g. int, float)
string_value Computed result of the initialization expression as string
'''
def __init__(self, type, name, init_expression, value): # @ReservedAssignment
Attribute.__init__(self, type, name)
self.init_expression = init_expression
self.value = value
self.string_value = repr(value)
if isinstance(value, long):
self.string_value = self.string_value.replace('L', '')
def get_normalized_definition(self):
return '%s %s = %s' % (self.type.get_normalized_definition(), self.name, self.init_expression)
class Parser:
'''
DSDL parser logic. Do not use this class directly; use the helper function instead.
'''
LOGGER_NAME = 'dsdl_parser'
def __init__(self, search_dirs):
self.search_dirs = validate_search_directories(search_dirs)
self.log = logging.getLogger(Parser.LOGGER_NAME)
def _namespace_from_filename(self, filename):
search_dirs = sorted(map(os.path.abspath, self.search_dirs)) # Nested last
filename = os.path.abspath(filename)
for dirname in search_dirs:
root_ns = dirname.split(os.path.sep)[-1]
if filename.startswith(dirname):
dir_len = len(dirname)
basename_len = len(os.path.basename(filename))
ns = filename[dir_len:-basename_len]
ns = (root_ns + '.' + ns.replace(os.path.sep, '.').strip('.')).strip('.')
validate_namespace_name(ns)
return ns
error('File [%s] was not found in search directories', filename)
def _full_typename_and_dtid_from_filename(self, filename):
basename = os.path.basename(filename)
items = basename.split('.')
if (len(items) != 2 and len(items) != 3) or items[-1] != 'uavcan':
error('Invalid file name [%s]; expected pattern: [<default-dtid>.]<short-type-name>.uavcan', basename)
if len(items) == 2:
default_dtid, name = None, items[0]
else:
default_dtid, name = items[0], items[1]
try:
default_dtid = int(default_dtid)
except ValueError:
error('Invalid default data type ID [%s]', default_dtid)
full_name = self._namespace_from_filename(filename) + '.' + name
validate_compound_type_full_name(full_name)
return full_name, default_dtid
def _locate_compound_type_definition(self, referencing_filename, typename):
def locate_namespace_directory(namespace):
namespace_components = namespace.split('.')
root_namespace, sub_namespace_components = namespace_components[0], namespace_components[1:]
for directory in self.search_dirs:
if directory.split(os.path.sep)[-1] == root_namespace:
return os.path.join(directory, *sub_namespace_components)
error('Unknown namespace [%s]', namespace)
if '.' not in typename:
current_namespace = self._namespace_from_filename(referencing_filename)
full_typename = current_namespace + '.' + typename
else:
full_typename = typename
namespace = '.'.join(full_typename.split('.')[:-1])
directory = locate_namespace_directory(namespace)
self.log.debug('Searching for [%s] in [%s]', full_typename, directory)
for fn in os.listdir(directory):
fn = os.path.join(directory, fn)
if os.path.isfile(fn):
try:
fn_full_typename, _dtid = self._full_typename_and_dtid_from_filename(fn)
if full_typename == fn_full_typename:
return fn
except Exception as ex:
self.log.debug('Unknown file [%s], skipping... [%s]', pretty_filename(fn), ex)
error('Type definition not found [%s]', typename)
def _parse_void_type(self, filename, bitlen):
enforce(1 <= bitlen <= 64, 'Invalid void bit length [%d]', bitlen)
return VoidType(bitlen)
def _parse_array_type(self, filename, value_typedef, size_spec, cast_mode):
self.log.debug('Parsing the array value type [%s]...', value_typedef)
value_type = self._parse_type(filename, value_typedef, cast_mode)
enforce(value_type.category != value_type.CATEGORY_ARRAY,
'Multidimensional arrays are not allowed (protip: use nested types)')
try:
if size_spec.startswith('<='):
max_size = int(size_spec[2:], 0)
mode = ArrayType.MODE_DYNAMIC
elif size_spec.startswith('<'):
max_size = int(size_spec[1:], 0) - 1
mode = ArrayType.MODE_DYNAMIC
else:
max_size = int(size_spec, 0)
mode = ArrayType.MODE_STATIC
except ValueError:
error('Invalid array size specifier [%s] (valid patterns: [<=X], [<X], [X])', size_spec)
enforce(max_size > 0, 'Array size must be positive, not %d', max_size)
return ArrayType(value_type, mode, max_size)
def _parse_primitive_type(self, filename, base_name, bitlen, cast_mode):
if cast_mode is None or cast_mode == 'saturated':
cast_mode = PrimitiveType.CAST_MODE_SATURATED
elif cast_mode == 'truncated':
cast_mode = PrimitiveType.CAST_MODE_TRUNCATED
else:
error('Invalid cast mode [%s]', cast_mode)
if base_name == 'bool':
return PrimitiveType(PrimitiveType.KIND_BOOLEAN, 1, cast_mode)
try:
kind = {
'uint' : PrimitiveType.KIND_UNSIGNED_INT,
'int' : PrimitiveType.KIND_SIGNED_INT,
'float': PrimitiveType.KIND_FLOAT,
}[base_name]
except KeyError:
error('Unknown primitive type (note: compound types should be in CamelCase)')
if kind == PrimitiveType.KIND_FLOAT:
enforce(bitlen in (16, 32, 64), 'Invalid bit length for float type [%d]', bitlen)
else:
enforce(2 <= bitlen <= 64, 'Invalid bit length [%d] (note: use bool instead of uint1)', bitlen)
return PrimitiveType(kind, bitlen, cast_mode)
def _parse_compound_type(self, filename, typedef):
definition_filename = self._locate_compound_type_definition(filename, typedef)
self.log.debug('Nested type [%s] is defined in [%s], parsing...', typedef, pretty_filename(definition_filename))
t = self.parse(definition_filename)
if t.kind == t.KIND_SERVICE:
error('A service type can not be nested into another compound type')
return t
def _parse_type(self, filename, typedef, cast_mode):
typedef = typedef.strip()
void_match = re.match(r'void(\d{1,2})$', typedef)
array_match = re.match(r'(.+?)\[([^\]]*)\]$', typedef)
primitive_match = re.match(r'([a-z]+)(\d{1,2})$|(bool)$', typedef)
if void_match:
size_spec = void_match.group(1).strip()
return self._parse_void_type(filename, int(size_spec))
elif array_match:
assert not primitive_match
value_typedef = array_match.group(1).strip()
size_spec = array_match.group(2).strip()
return self._parse_array_type(filename, value_typedef, size_spec, cast_mode)
elif primitive_match:
if primitive_match.group(0) == 'bool':
return self._parse_primitive_type(filename, 'bool', 1, cast_mode)
else:
base_name = primitive_match.group(1)
bitlen = int(primitive_match.group(2))
return self._parse_primitive_type(filename, base_name, bitlen, cast_mode)
else:
enforce(cast_mode is None, 'Cast mode specifier is not applicable for compound types [%s]', cast_mode)
return self._parse_compound_type(filename, typedef)
def _make_constant(self, attrtype, name, init_expression):
enforce(attrtype.category == attrtype.CATEGORY_PRIMITIVE, 'Invalid type for constant [%d]', attrtype.category)
init_expression = ''.join(init_expression.split()) # Remove spaces
value = evaluate_expression(init_expression)
if isinstance(value, str) and len(value) == 1: # ASCII character
value = ord(value)
elif isinstance(value, (float, int, bool, long)): # Numeric literal
value = {
attrtype.KIND_UNSIGNED_INT : long,
attrtype.KIND_SIGNED_INT : long,
attrtype.KIND_BOOLEAN : int, # Not bool because we need to check range
attrtype.KIND_FLOAT : float
}[attrtype.kind](value)
else:
error('Invalid type of constant initialization expression [%s]', type(value).__name__)
self.log.debug('Constant initialization expression evaluated as: [%s] --> %s', init_expression, repr(value))
attrtype.validate_value_range(value)
return Constant(attrtype, name, init_expression, value)
def _parse_line(self, filename, tokens):
cast_mode = None
if tokens[0] == 'saturated' or tokens[0] == 'truncated':
cast_mode, tokens = tokens[0], tokens[1:]
if len(tokens) < 2 and not tokens[0].startswith('void'):
error('Invalid attribute definition')
if len(tokens) == 1:
typename, attrname, tokens = tokens[0], None, []
else:
typename, attrname, tokens = tokens[0], tokens[1], tokens[2:]
validate_attribute_name(attrname)
attrtype = self._parse_type(filename, typename, cast_mode)
if len(tokens) > 0:
if len(tokens) < 2 or tokens[0] != '=':
error('Constant assignment expected')
expression = ' '.join(tokens[1:])
return self._make_constant(attrtype, attrname, expression)
else:
return Field(attrtype, attrname)
def _tokenize(self, text):
for idx, line in enumerate(text.splitlines()):
line = re.sub('#.*', '', line).strip() # Remove comments and leading/trailing whitespaces
if line:
tokens = [tk for tk in line.split() if tk]
yield idx + 1, tokens
def parse_source(self, filename, source_text):
try:
full_typename, default_dtid = self._full_typename_and_dtid_from_filename(filename)
numbered_lines = list(self._tokenize(source_text))
all_attributes_names = set()
fields, constants, resp_fields, resp_constants = [], [], [], []
union, resp_union = False, False
response_part = False
for num, tokens in numbered_lines:
try:
if tokens == ['---']:
enforce(not response_part, 'Duplicate response mark')
response_part = True
all_attributes_names = set()
continue
if tokens == ['@union']:
if response_part:
enforce(not resp_union, 'Response data structure has already been declared as union')
resp_union = True
else:
enforce(not union, 'Data structure has already been declared as union')
union = True
continue
attr = self._parse_line(filename, tokens)
if attr.name and attr.name in all_attributes_names:
error('Duplicated attribute name [%s]', attr.name)
all_attributes_names.add(attr.name)
if isinstance(attr, Constant):
(resp_constants if response_part else constants).append(attr)
elif isinstance(attr, Field):
(resp_fields if response_part else fields).append(attr)
else:
error('Unknown attribute type - internal error')
except DsdlException as ex:
if not ex.line:
ex.line = num
raise ex
except Exception as ex:
self.log.error('Internal error', exc_info=True)
raise DsdlException('Internal error: %s' % str(ex), line=num)
if response_part:
t = CompoundType(full_typename, CompoundType.KIND_SERVICE, filename, default_dtid, source_text)
t.request_fields = fields
t.request_constants = constants
t.response_fields = resp_fields
t.response_constants = resp_constants
t.request_union = union
t.response_union = resp_union
max_bitlen = t.get_max_bitlen_request(), t.get_max_bitlen_response()
max_bytelen = tuple(map(bitlen_to_bytelen, max_bitlen))
else:
t = CompoundType(full_typename, CompoundType.KIND_MESSAGE, filename, default_dtid, source_text)
t.fields = fields
t.constants = constants
t.union = union
max_bitlen = t.get_max_bitlen()
max_bytelen = bitlen_to_bytelen(max_bitlen)
validate_union(t)
validate_data_type_id(t)
self.log.info('Type [%s], default DTID: %s, signature: %08x, maxbits: %s, maxbytes: %s, DSSD:',
full_typename, default_dtid, t.get_dsdl_signature(), max_bitlen, max_bytelen)
for ln in t.get_dsdl_signature_source_definition().splitlines():
self.log.info(' %s', ln)
return t
except DsdlException as ex:
if not ex.file:
ex.file = filename
raise ex
def parse(self, filename):
try:
filename = os.path.abspath(filename)
with open(filename) as f:
source_text = f.read()
return self.parse_source(filename, source_text)
except IOError as ex:
raise DsdlException('IO error: %s' % str(ex), file=filename)
except Exception as ex:
self.log.error('Internal error', exc_info=True)
raise DsdlException('Internal error: %s' % str(ex), file=filename)
def error(fmt, *args):
raise DsdlException(fmt % args)
def enforce(cond, fmt, *args):
if not cond:
error(fmt, *args)
def bitlen_to_bytelen(x):
return int((x + 7) / 8)
def evaluate_expression(expression):
try:
env = {
'locals': None,
'globals': None,
'__builtins__': None,
'true': 1,
'false': 0
}
return eval(expression, env)
except Exception as ex:
error('Cannot evaluate expression: %s', str(ex))
def validate_search_directories(dirnames):
dirnames = set(dirnames)
dirnames = list(map(os.path.abspath, dirnames))
for d1 in dirnames:
for d2 in dirnames:
if d1 == d2:
continue
enforce(not d1.startswith(d2), 'Nested search directories are not allowed [%s] [%s]', d1, d2)
enforce(d1.split(os.path.sep)[-1] != d2.split(os.path.sep)[-1],
'Namespace roots must be unique [%s] [%s]', d1, d2)
return dirnames
def validate_namespace_name(name):
for component in name.split('.'):
enforce(re.match(r'[a-z][a-z0-9_]*$', component), 'Invalid namespace name [%s]', name)
enforce(len(name) <= MAX_FULL_TYPE_NAME_LEN, 'Namespace name is too long [%s]', name)
def validate_compound_type_full_name(name):
enforce('.' in name, 'Full type name must explicitly specify its namespace [%s]', name)
short_name = name.split('.')[-1]
namespace = '.'.join(name.split('.')[:-1])
validate_namespace_name(namespace)
enforce(re.match(r'[A-Z][A-Za-z0-9_]*$', short_name), 'Invalid type name [%s]', name)
enforce(len(name) <= MAX_FULL_TYPE_NAME_LEN, 'Type name is too long [%s]', name)
def validate_attribute_name(name):
enforce(re.match(r'[a-zA-Z][a-zA-Z0-9_]*$', name), 'Invalid attribute name [%s]', name)
def validate_data_type_id(t):
if t.default_dtid is None:
return
if t.kind == t.KIND_MESSAGE:
enforce(0 <= t.default_dtid <= MESSAGE_DATA_TYPE_ID_MAX,
'Invalid data type ID for message [%s]', t.default_dtid)
elif t.kind == t.KIND_SERVICE:
enforce(0 <= t.default_dtid <= SERVICE_DATA_TYPE_ID_MAX,
'Invalid data type ID for service [%s]', t.default_dtid)
else:
error('Invalid kind: %s', t.kind)
def validate_union(t):
def check_fields(fields):
enforce(len(fields) > 1, 'Union contains less than 2 fields')
enforce(not any(_.type.category == _.type.CATEGORY_VOID for _ in fields), 'Union must not contain void fields')
if t.kind == t.KIND_MESSAGE:
if t.union:
check_fields(t.fields)
elif t.kind == t.KIND_SERVICE:
if t.request_union:
check_fields(t.request_fields)
if t.response_union:
check_fields(t.response_fields)
else:
error('Invalid kind: %s', t.kind)
def parse_namespaces(source_dirs, search_dirs=None):
'''
Use only this function to parse DSDL definitions.
This function takes a list of root namespace directories (containing DSDL definition files to parse) and an
optional list of search directories (containing DSDL definition files that can be referenced from the types
that are going to be parsed).
Returns the list of parsed type definitions, where type of each element is CompoundType.
Args:
source_dirs List of root namespace directories to parse.
search_dirs List of root namespace directories with referenced types (optional). This list is
automaitcally extended with source_dirs.
Example:
>>> import uavcan
>>> a = uavcan.dsdl.parse_namespaces(['../dsdl/uavcan'])
>>> len(a)
77
>>> a[0]
uavcan.Timestamp
>>> a[0].fields
[truncated uint48 husec]
>>> a[0].constants
[saturated uint48 UNKNOWN = 0, saturated uint48 USEC_PER_LSB = 100]
'''
def walk():
import fnmatch
from functools import partial
def on_walk_error(directory, ex):
raise DsdlException('OS error in [%s]: %s' % (directory, str(ex)))
for source_dir in source_dirs:
walker = os.walk(source_dir, onerror=partial(on_walk_error, source_dir), followlinks=True)
for root, _dirnames, filenames in walker:
for filename in fnmatch.filter(filenames, '*.uavcan'):
filename = os.path.join(root, filename)
yield filename
all_default_dtid = {} # (kind, dtid) : filename
def ensure_unique_dtid(t, filename):
if t.default_dtid is None:
return
key = t.kind, t.default_dtid
if key in all_default_dtid:
first = pretty_filename(all_default_dtid[key])
second = pretty_filename(filename)
error('Default data type ID collision: [%s] [%s]', first, second)
all_default_dtid[key] = filename
parser = Parser(source_dirs + (search_dirs or []))
output_types = []
for filename in walk():
t = parser.parse(filename)
ensure_unique_dtid(t, filename)
output_types.append(t)
return output_types
| zhumingliang1209/Ardupilot | ardupilot/modules/uavcan/libuavcan/dsdl_compiler/pyuavcan/uavcan/dsdl/parser.py | Python | gpl-3.0 | 31,158 |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount
short_description: Module to manage openshift service accounts
description:
- Manage openshift service accounts programmatically.
options:
state:
description:
- If present, the service account will be created if it doesn't exist or updated if different. If absent, the service account will be removed if present. If list, information about the service account will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
name:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account.
required: true
default: default
aliases: []
secrets:
description:
- A list of secrets that are associated with the service account.
required: false
default: None
aliases: []
image_pull_secrets:
description:
- A list of the image pull secrets that are associated with the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create registry serviceaccount
oc_serviceaccount:
name: registry
namespace: default
secrets:
- docker-registry-config
- registry-secret
register: sa_out
'''
# -*- -*- -*- End included fragment: doc/serviceaccount -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}.{}'.format(self.filename, time.strftime("%Y%m%dT%H%M%S")))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
try:
# AUDIT:maybe-no-member makes sense due to different yaml libraries
# pylint: disable=maybe-no-member
curr_value = yaml.safe_load(invalue, Loader=yaml.RoundTripLoader)
except AttributeError:
curr_value = yaml.safe_load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCServiceAccount(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCServiceAccount, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.service_account = None
def exists(self):
''' return whether a volume exists '''
if self.service_account:
return True
return False
def get(self):
'''return volume information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.service_account = ServiceAccount(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
elif 'namespaces \"%s\" not found' % self.config.namespace in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
self.service_account.add_secret(secret)
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
self.service_account.add_image_pull_secret(secret)
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
# since creating an service account generates secrets and imagepullsecrets
# check_def_equal will not work
# Instead, verify all secrets passed are in the list
for secret in self.config.secrets:
result = self.service_account.find_secret(secret)
if not result:
return True
for secret in self.config.image_pull_secrets:
result = self.service_account.find_image_pull_secret(secret)
if not result:
return True
return False
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
rconfig = ServiceAccountConfig(params['name'],
params['namespace'],
params['kubeconfig'],
params['secrets'],
params['image_pull_secrets'],
)
oc_sa = OCServiceAccount(rconfig,
verbose=params['debug'])
state = params['state']
api_rval = oc_sa.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': 'list'}
########
# Delete
########
if state == 'absent':
if oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a delete.'}
api_rval = oc_sa.delete()
return {'changed': True, 'results': api_rval, 'state': 'absent'}
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
if not oc_sa.exists():
if check_mode:
return {'changed': True, 'msg': 'Would have performed a create.'}
# Create it here
api_rval = oc_sa.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
########
# Update
########
if oc_sa.needs_update():
api_rval = oc_sa.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': 'present'}
return {'changed': False, 'results': api_rval, 'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
def main():
'''
ansible oc module for service accounts
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
secrets=dict(default=None, type='list'),
image_pull_secrets=dict(default=None, type='list'),
),
supports_check_mode=True,
)
rval = OCServiceAccount.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount.py -*- -*- -*-
| blrm/openshift-tools | openshift/installer/vendored/openshift-ansible-3.9.40/roles/lib_openshift/library/oc_serviceaccount.py | Python | apache-2.0 | 61,243 |
# Testing sha module (NIST's Secure Hash Algorithm)
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
import sha
import unittest
from test import test_support
class SHATestCase(unittest.TestCase):
def check(self, data, digest):
computed = sha.new(data).hexdigest()
self.assert_(computed == digest)
def test_case_1(self):
self.check("abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_2(self):
self.check("abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_3(self):
self.check("a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
def test_main():
test_support.run_unittest(SHATestCase)
if __name__ == "__main__":
test_main()
| xbmc/atv2 | xbmc/lib/libPython/Python/Lib/test/test_sha.py | Python | gpl-2.0 | 971 |
#***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import FreeCAD,WorkingPlane,math,Draft,ArchCommands,DraftVecUtils,ArchComponent
from FreeCAD import Vector
if FreeCAD.GuiUp:
import FreeCADGui
from PySide import QtCore, QtGui
from DraftTools import translate
from pivy import coin
from PySide.QtCore import QT_TRANSLATE_NOOP
else:
# \cond
def translate(ctxt,txt):
return txt
def QT_TRANSLATE_NOOP(ctxt,txt):
return txt
# \endcond
## @package ArchSectionPlane
# \ingroup ARCH
# \brief The Section plane object and tools
#
# This module provides tools to build Section plane objects.
# It also contains functionality to produce SVG rendering of
# section planes, to be used in TechDraw and Drawing modules
def makeSectionPlane(objectslist=None,name="Section"):
"""makeSectionPlane([objectslist]) : Creates a Section plane objects including the
given objects. If no object is given, the whole document will be considered."""
obj = FreeCAD.ActiveDocument.addObject("App::FeaturePython",name)
obj.Label = translate("Arch",name)
_SectionPlane(obj)
if FreeCAD.GuiUp:
_ViewProviderSectionPlane(obj.ViewObject)
if objectslist:
g = []
for o in objectslist:
if o.isDerivedFrom("Part::Feature"):
g.append(o)
elif o.isDerivedFrom("App::DocumentObjectGroup"):
g.append(o)
obj.Objects = g
return obj
def makeSectionView(section,name="View"):
"""makeSectionView(section) : Creates a Drawing view of the given Section Plane
in the active Page object (a new page will be created if none exists"""
page = None
for o in FreeCAD.ActiveDocument.Objects:
if o.isDerivedFrom("Drawing::FeaturePage"):
page = o
break
if not page:
page = FreeCAD.ActiveDocument.addObject("Drawing::FeaturePage",translate("Arch","Page"))
page.Template = Draft.getParam("template",FreeCAD.getResourceDir()+'Mod/Drawing/Templates/A3_Landscape.svg')
view = FreeCAD.ActiveDocument.addObject("Drawing::FeatureViewPython",name)
page.addObject(view)
_ArchDrawingView(view)
view.Source = section
view.Label = translate("Arch","View of")+" "+section.Name
return view
def getCutShapes(objs,section,showHidden):
import Part,DraftGeomUtils
shapes = []
hshapes = []
sshapes = []
for o in objs:
if o.isDerivedFrom("Part::Feature"):
if o.Shape.isNull():
pass
elif section.OnlySolids:
if o.Shape.isValid():
shapes.extend(o.Shape.Solids)
else:
print(section.Label,": Skipping invalid object:",o.Label)
else:
shapes.append(o.Shape)
cutface,cutvolume,invcutvolume = ArchCommands.getCutVolume(section.Shape.copy(),shapes)
if cutvolume:
nsh = []
for sh in shapes:
for sol in sh.Solids:
if sol.Volume < 0:
sol.reverse()
c = sol.cut(cutvolume)
s = sol.section(cutface)
try:
wires = DraftGeomUtils.findWires(s.Edges)
for w in wires:
f = Part.Face(w)
sshapes.append(f)
#s = Part.Wire(s.Edges)
#s = Part.Face(s)
except Part.OCCError:
#print "ArchDrawingView: unable to get a face"
sshapes.append(s)
nsh.extend(c.Solids)
#sshapes.append(s)
if showHidden:
c = sol.cut(invcutvolume)
hshapes.append(c)
shapes = nsh
return shapes,hshapes,sshapes,cutface,cutvolume,invcutvolume
def getSVG(section,allOn=False,renderMode="Wireframe",showHidden=False,showFill=False,scale=1,linewidth=1,fontsize=1,techdraw=False,rotation=0):
"""getSVG(section,[allOn,renderMode,showHidden,showFill,scale,linewidth,fontsize]) :
returns an SVG fragment from an Arch section plane. If
allOn is True, all cut objects are shown, regardless if they are visible or not.
renderMode can be Wireframe (default) or Solid to use the Arch solid renderer. If
showHidden is True, the hidden geometry above the section plane is shown in dashed line.
If showFill is True, the cut areas get filled with a pattern"""
if not section.Objects:
return ""
import Part,DraftGeomUtils
p = FreeCAD.Placement(section.Placement)
direction = p.Rotation.multVec(FreeCAD.Vector(0,0,1))
objs = Draft.getGroupContents(section.Objects,walls=True,addgroups=True)
if not allOn:
objs = Draft.removeHidden(objs)
# separate spaces and Draft objects
spaces = []
nonspaces = []
drafts = []
windows = []
cutface = None
for o in objs:
if Draft.getType(o) == "Space":
spaces.append(o)
elif Draft.getType(o) in ["Dimension","Annotation"]:
drafts.append(o)
elif o.isDerivedFrom("Part::Part2DObject"):
drafts.append(o)
else:
nonspaces.append(o)
if Draft.getType(o) == "Window":
windows.append(o)
objs = nonspaces
svg = ''
fillpattern = '<pattern id="sectionfill" patternUnits="userSpaceOnUse" patternTransform="matrix(5,0,0,5,0,0)"'
fillpattern += ' x="0" y="0" width="10" height="10">'
fillpattern += '<g>'
fillpattern += '<rect width="10" height="10" style="stroke:none; fill:#ffffff" /><path style="stroke:#000000; stroke-width:1" d="M0,0 l10,10" /></g></pattern>'
# generating SVG
if renderMode in ["Solid",1]:
# render using the Arch Vector Renderer
import ArchVRM, WorkingPlane
wp = WorkingPlane.plane()
wp.setFromPlacement(section.Placement)
#wp.inverse()
render = ArchVRM.Renderer()
render.setWorkingPlane(wp)
render.addObjects(objs)
if showHidden:
render.cut(section.Shape,showHidden)
else:
render.cut(section.Shape)
svg += '<g transform="scale(1,-1)">\n'
svg += render.getViewSVG(linewidth="LWPlaceholder")
svg += fillpattern
svg += render.getSectionSVG(linewidth="SWPlaceholder",fillpattern="sectionfill")
if showHidden:
svg += render.getHiddenSVG(linewidth="LWPlaceholder")
svg += '</g>\n'
# print(render.info())
else:
# render using the Drawing module
import Drawing, Part
shapes,hshapes,sshapes,cutface,cutvolume,invcutvolume = getCutShapes(objs,section,showHidden)
if shapes:
baseshape = Part.makeCompound(shapes)
svgf = Drawing.projectToSVG(baseshape,direction)
if svgf:
svgf = svgf.replace('stroke-width="0.35"','stroke-width="LWPlaceholder"')
svgf = svgf.replace('stroke-width="1"','stroke-width="LWPlaceholder"')
svgf = svgf.replace('stroke-width:0.01','stroke-width:LWPlaceholder')
svg += svgf
if hshapes:
hshapes = Part.makeCompound(hshapes)
svgh = Drawing.projectToSVG(hshapes,direction)
if svgh:
svgh = svgh.replace('stroke-width="0.35"','stroke-width="LWPlaceholder"')
svgh = svgh.replace('stroke-width="1"','stroke-width="LWPlaceholder"')
svgh = svgh.replace('stroke-width:0.01','stroke-width:LWPlaceholder')
svgh = svgh.replace('fill="none"','fill="none"\nstroke-dasharray="DAPlaceholder"')
svg += svgh
if sshapes:
svgs = ""
if showFill:
#svgs += fillpattern
svgs += '<g transform="rotate(180)">\n'
for s in sshapes:
if s.Edges:
#f = Draft.getSVG(s,direction=direction.negative(),linewidth=0,fillstyle="sectionfill",color=(0,0,0))
# temporarily disabling fill patterns
f = Draft.getSVG(s,direction=direction.negative(),linewidth=0,fillstyle="#aaaaaa",color=(0,0,0))
svgs += f
svgs += "</g>\n"
sshapes = Part.makeCompound(sshapes)
svgs += Drawing.projectToSVG(sshapes,direction)
if svgs:
svgs = svgs.replace('stroke-width="0.35"','stroke-width="SWPlaceholder"')
svgs = svgs.replace('stroke-width="1"','stroke-width="SWPlaceholder"')
svgs = svgs.replace('stroke-width:0.01','stroke-width:SWPlaceholder')
svgs = svgs.replace('stroke-width="0.35 px"','stroke-width="SWPlaceholder"')
svgs = svgs.replace('stroke-width:0.35','stroke-width:SWPlaceholder')
svg += svgs
scaledlinewidth = linewidth/scale
st = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").GetFloat("CutLineThickness",2)
yt = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").GetFloat("SymbolLineThickness",0.6)
da = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/Mod/Arch").GetString("archHiddenPattern","30,10")
da = da.replace(" ","")
svg = svg.replace('LWPlaceholder', str(scaledlinewidth) + 'px')
svg = svg.replace('SWPlaceholder', str(scaledlinewidth*st) + 'px')
svg = svg.replace('DAPlaceholder', str(da))
if drafts:
if not techdraw:
svg += '<g transform="scale(1,-1)">'
for d in drafts:
svg += Draft.getSVG(d,scale=scale,linewidth=linewidth*yt,fontsize=fontsize,direction=direction,techdraw=techdraw,rotation=rotation)
if not techdraw:
svg += '</g>'
# filter out spaces not cut by the section plane
if cutface and spaces:
spaces = [s for s in spaces if s.Shape.BoundBox.intersect(cutface.BoundBox)]
if spaces:
if not techdraw:
svg += '<g transform="scale(1,-1)">'
for s in spaces:
svg += Draft.getSVG(s,scale=scale,linewidth=linewidth*yt,fontsize=fontsize,direction=direction,techdraw=techdraw,rotation=rotation)
if not techdraw:
svg += '</g>'
# add additional edge symbols from windows
cutwindows = []
if cutface and windows:
cutwindows = [w.Name for w in windows if w.Shape.BoundBox.intersect(cutface.BoundBox)]
if windows:
sh = []
for w in windows:
if not hasattr(w.Proxy,"sshapes"):
w.Proxy.execute(w)
if hasattr(w.Proxy,"sshapes"):
if w.Proxy.sshapes and (w.Name in cutwindows):
c = Part.makeCompound(w.Proxy.sshapes)
c.Placement = w.Placement
sh.append(c)
# buggy for now...
#if hasattr(w.Proxy,"vshapes"):
# if w.Proxy.vshapes:
# c = Part.makeCompound(w.Proxy.vshapes)
# c.Placement = w.Placement
# sh.append(c)
if sh:
if not techdraw:
svg += '<g transform="scale(1,-1)">'
for s in sh:
svg += Draft.getSVG(s,scale=scale,linewidth=linewidth*yt,fontsize=fontsize,fillstyle="none",direction=direction,techdraw=techdraw,rotation=rotation)
if not techdraw:
svg += '</g>'
#print "complete node:",svg
return svg
def getDXF(obj):
"returns a DXF representation from a TechDraw/Drawing view"
allOn = True
if hasattr(obj,"AllOn"):
allOn = obj.AllOn
elif hasattr(obj,"AlwaysOn"):
allOn = obj.AlwaysOn
showHidden = False
if hasattr(obj,"showCut"):
showHidden = obj.showCut
elif hasattr(obj,"showHidden"):
showHidden = obj.showHidden
result = []
import Drawing,Part
if not obj.Source:
return result
section = obj.Source
if not section.Objects:
return result
p = FreeCAD.Placement(section.Placement)
direction = p.Rotation.multVec(FreeCAD.Vector(0,0,1))
objs = Draft.getGroupContents(section.Objects,walls=True,addgroups=True)
if not allOn:
objs = Draft.removeHidden(objs)
# separate spaces and Draft objects
spaces = []
nonspaces = []
drafts = []
objs = [o for o in objs if ((not(Draft.getType(o) in ["Space","Dimension","Annotation"])) and (not (o.isDerivedFrom("Part::Part2DObject"))))]
shapes,hshapes,sshapes,cutface,cutvolume,invcutvolume = getCutShapes(objs,section,showHidden)
if shapes:
result.append(Drawing.projectToDXF(Part.makeCompound(shapes),direction))
if sshapes:
result.append(Drawing.projectToDXF(Part.makeCompound(sshapes),direction))
if hshapes:
result.append(Drawing.projectToDXF(Part.makeCompound(hshapes),direction))
return result
class _CommandSectionPlane:
"the Arch SectionPlane command definition"
def GetResources(self):
return {'Pixmap' : 'Arch_SectionPlane',
'Accel': "S, E",
'MenuText': QT_TRANSLATE_NOOP("Arch_SectionPlane","Section Plane"),
'ToolTip': QT_TRANSLATE_NOOP("Arch_SectionPlane","Creates a section plane object, including the selected objects")}
def IsActive(self):
return not FreeCAD.ActiveDocument is None
def Activated(self):
sel = FreeCADGui.Selection.getSelection()
ss = "["
for o in sel:
if len(ss) > 1:
ss += ","
ss += "FreeCAD.ActiveDocument."+o.Name
ss += "]"
FreeCAD.ActiveDocument.openTransaction(translate("Arch","Create Section Plane"))
FreeCADGui.addModule("Arch")
FreeCADGui.doCommand("section = Arch.makeSectionPlane("+ss+")")
FreeCADGui.doCommand("section.Placement = FreeCAD.DraftWorkingPlane.getPlacement()")
#FreeCADGui.doCommand("Arch.makeSectionView(section)")
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
class _SectionPlane:
"A section plane object"
def __init__(self,obj):
obj.Proxy = self
obj.addProperty("App::PropertyPlacement","Placement","Base",QT_TRANSLATE_NOOP("App::Property","The placement of this object"))
obj.addProperty("Part::PropertyPartShape","Shape","Base","")
obj.addProperty("App::PropertyLinkList","Objects","Arch",QT_TRANSLATE_NOOP("App::Property","The objects that must be considered by this section plane. Empty means all document"))
obj.addProperty("App::PropertyBool","OnlySolids","Arch",QT_TRANSLATE_NOOP("App::Property","If false, non-solids will be cut too, with possible wrong results."))
obj.OnlySolids = True
self.Type = "SectionPlane"
def execute(self,obj):
import Part
if hasattr(obj.ViewObject,"DisplayLength"):
l = obj.ViewObject.DisplayLength.Value
h = obj.ViewObject.DisplayHeight.Value
elif hasattr(obj.ViewObject,"DisplaySize"):
# old objects
l = obj.ViewObject.DisplaySize.Value
h = obj.ViewObject.DisplaySize.Value
else:
l = 1
h = 1
p = Part.makePlane(l,l,Vector(l/2,-l/2,0),Vector(0,0,-1))
# make sure the normal direction is pointing outwards, you never know what OCC will decide...
if p.normalAt(0,0).getAngle(obj.Placement.Rotation.multVec(FreeCAD.Vector(0,0,1))) > 1:
p.reverse()
p.Placement = obj.Placement
obj.Shape = p
def onChanged(self,obj,prop):
pass
def getNormal(self,obj):
return obj.Shape.Faces[0].normalAt(0,0)
def __getstate__(self):
return self.Type
def __setstate__(self,state):
if state:
self.Type = state
class _ViewProviderSectionPlane:
"A View Provider for Section Planes"
def __init__(self,vobj):
vobj.addProperty("App::PropertyLength","DisplayLength","Arch",QT_TRANSLATE_NOOP("App::Property","The display length of this section plane"))
vobj.addProperty("App::PropertyLength","DisplayHeight","Arch",QT_TRANSLATE_NOOP("App::Property","The display height of this section plane"))
vobj.addProperty("App::PropertyLength","ArrowSize","Arch",QT_TRANSLATE_NOOP("App::Property","The size of the arrows of this section plane"))
vobj.addProperty("App::PropertyPercent","Transparency","Base","")
vobj.addProperty("App::PropertyFloat","LineWidth","Base","")
vobj.addProperty("App::PropertyColor","LineColor","Base","")
vobj.addProperty("App::PropertyBool","CutView","Arch",QT_TRANSLATE_NOOP("App::Property","Show the cut in the 3D view"))
vobj.DisplayLength = 1000
vobj.DisplayHeight = 1000
vobj.ArrowSize = 50
vobj.Transparency = 85
vobj.LineWidth = 1
vobj.LineColor = (0.0,0.0,0.4,1.0)
vobj.CutView = False
vobj.Proxy = self
self.Object = vobj.Object
def getIcon(self):
import Arch_rc
return ":/icons/Arch_SectionPlane_Tree.svg"
def claimChildren(self):
return []
def attach(self,vobj):
self.clip = None
self.mat1 = coin.SoMaterial()
self.mat2 = coin.SoMaterial()
self.fcoords = coin.SoCoordinate3()
#fs = coin.SoType.fromName("SoBrepFaceSet").createInstance() # this causes a FreeCAD freeze for me
fs = coin.SoIndexedFaceSet()
fs.coordIndex.setValues(0,7,[0,1,2,-1,0,2,3])
self.drawstyle = coin.SoDrawStyle()
self.drawstyle.style = coin.SoDrawStyle.LINES
self.lcoords = coin.SoCoordinate3()
ls = coin.SoType.fromName("SoBrepEdgeSet").createInstance()
ls.coordIndex.setValues(0,57,[0,1,-1,2,3,4,5,-1,6,7,8,9,-1,10,11,-1,12,13,14,15,-1,16,17,18,19,-1,20,21,-1,22,23,24,25,-1,26,27,28,29,-1,30,31,-1,32,33,34,35,-1,36,37,38,39,-1,40,41,42,43,44])
sep = coin.SoSeparator()
psep = coin.SoSeparator()
fsep = coin.SoSeparator()
fsep.addChild(self.mat2)
fsep.addChild(self.fcoords)
fsep.addChild(fs)
psep.addChild(self.mat1)
psep.addChild(self.drawstyle)
psep.addChild(self.lcoords)
psep.addChild(ls)
sep.addChild(fsep)
sep.addChild(psep)
vobj.addDisplayMode(sep,"Default")
self.onChanged(vobj,"DisplayLength")
self.onChanged(vobj,"LineColor")
self.onChanged(vobj,"Transparency")
self.onChanged(vobj,"CutView")
def getDisplayModes(self,vobj):
return ["Default"]
def getDefaultDisplayMode(self):
return "Default"
def setDisplayMode(self,mode):
return mode
def updateData(self,obj,prop):
if prop in ["Placement"]:
self.onChanged(obj.ViewObject,"DisplayLength")
self.onChanged(obj.ViewObject,"CutView")
return
def onChanged(self,vobj,prop):
if prop == "LineColor":
l = vobj.LineColor
self.mat1.diffuseColor.setValue([l[0],l[1],l[2]])
self.mat2.diffuseColor.setValue([l[0],l[1],l[2]])
elif prop == "Transparency":
if hasattr(vobj,"Transparency"):
self.mat2.transparency.setValue(vobj.Transparency/100.0)
elif prop in ["DisplayLength","DisplayHeight","ArrowSize"]:
if hasattr(vobj,"DisplayLength"):
ld = vobj.DisplayLength.Value/2
hd = vobj.DisplayHeight.Value/2
elif hasattr(vobj,"DisplaySize"):
# old objects
ld = vobj.DisplaySize.Value/2
hd = vobj.DisplaySize.Value/2
else:
ld = 1
hd = 1
verts = []
fverts = []
for v in [[-ld,-hd],[ld,-hd],[ld,hd],[-ld,hd]]:
if hasattr(vobj,"ArrowSize"):
l1 = vobj.ArrowSize.Value if vobj.ArrowSize.Value > 0 else 0.1
else:
l1 = 0.1
l2 = l1/3
pl = FreeCAD.Placement(vobj.Object.Placement)
p1 = pl.multVec(Vector(v[0],v[1],0))
p2 = pl.multVec(Vector(v[0],v[1],-l1))
p3 = pl.multVec(Vector(v[0]-l2,v[1],-l1+l2))
p4 = pl.multVec(Vector(v[0]+l2,v[1],-l1+l2))
p5 = pl.multVec(Vector(v[0],v[1]-l2,-l1+l2))
p6 = pl.multVec(Vector(v[0],v[1]+l2,-l1+l2))
verts.extend([[p1.x,p1.y,p1.z],[p2.x,p2.y,p2.z]])
fverts.append([p1.x,p1.y,p1.z])
verts.extend([[p2.x,p2.y,p2.z],[p3.x,p3.y,p3.z],[p4.x,p4.y,p4.z],[p2.x,p2.y,p2.z]])
verts.extend([[p2.x,p2.y,p2.z],[p5.x,p5.y,p5.z],[p6.x,p6.y,p6.z],[p2.x,p2.y,p2.z]])
verts.extend(fverts+[fverts[0]])
self.lcoords.point.setValues(verts)
self.fcoords.point.setValues(fverts)
elif prop == "LineWidth":
self.drawstyle.lineWidth = vobj.LineWidth
elif prop == "CutView":
if hasattr(vobj,"CutView") and FreeCADGui.ActiveDocument.ActiveView:
sg = FreeCADGui.ActiveDocument.ActiveView.getSceneGraph()
if vobj.CutView:
if self.clip:
sg.removeChild(self.clip)
self.clip = None
for o in Draft.getGroupContents(vobj.Object.Objects,walls=True):
if hasattr(o.ViewObject,"Lighting"):
o.ViewObject.Lighting = "One side"
self.clip = coin.SoClipPlane()
self.clip.on.setValue(True)
norm = vobj.Object.Proxy.getNormal(vobj.Object)
mp = vobj.Object.Shape.CenterOfMass
mp = DraftVecUtils.project(mp,norm)
dist = mp.Length #- 0.1 # to not clip exactly on the section object
norm = norm.negative()
if mp.getAngle(norm) > 1:
dist += 1
dist = -dist
else:
dist -= 0.1
plane = coin.SbPlane(coin.SbVec3f(norm.x,norm.y,norm.z),dist)
self.clip.plane.setValue(plane)
sg.insertChild(self.clip,0)
else:
if self.clip:
sg.removeChild(self.clip)
self.clip = None
return
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def setEdit(self,vobj,mode):
taskd = SectionPlaneTaskPanel()
taskd.obj = vobj.Object
taskd.update()
FreeCADGui.Control.showDialog(taskd)
return True
def unsetEdit(self,vobj,mode):
FreeCADGui.Control.closeDialog()
return False
def doubleClicked(self,vobj):
self.setEdit(vobj,None)
class _ArchDrawingView:
def __init__(self, obj):
obj.addProperty("App::PropertyLink","Source","Base",QT_TRANSLATE_NOOP("App::Property","The linked object"))
obj.addProperty("App::PropertyEnumeration","RenderingMode","Drawing view",QT_TRANSLATE_NOOP("App::Property","The rendering mode to use"))
obj.addProperty("App::PropertyBool","ShowCut","Drawing view",QT_TRANSLATE_NOOP("App::Property","If cut geometry is shown or not"))
obj.addProperty("App::PropertyBool","ShowFill","Drawing view",QT_TRANSLATE_NOOP("App::Property","If cut geometry is filled or not"))
obj.addProperty("App::PropertyFloat","LineWidth","Drawing view",QT_TRANSLATE_NOOP("App::Property","The line width of the rendered objects"))
obj.addProperty("App::PropertyLength","FontSize","Drawing view",QT_TRANSLATE_NOOP("App::Property","The size of the texts inside this object"))
obj.addProperty("App::PropertyBool","AlwaysOn","Drawing view",QT_TRANSLATE_NOOP("App::Property","If checked, source objects are displayed regardless of being visible in the 3D model"))
obj.RenderingMode = ["Solid","Wireframe"]
obj.RenderingMode = "Wireframe"
obj.LineWidth = 0.35
obj.ShowCut = False
obj.Proxy = self
self.Type = "ArchSectionView"
obj.FontSize = 12
def execute(self, obj):
if hasattr(obj,"Source"):
if obj.Source:
if hasattr(obj,"AlwaysOn"):
a = obj.AlwaysOn
else:
a = False
svgbody = getSVG(obj.Source,a,obj.RenderingMode,obj.ShowCut,obj.ShowFill,obj.Scale,obj.LineWidth,obj.FontSize)
if svgbody:
result = '<g id="' + obj.Name + '"'
result += ' transform="'
result += 'rotate('+str(obj.Rotation)+','+str(obj.X)+','+str(obj.Y)+') '
result += 'translate('+str(obj.X)+','+str(obj.Y)+') '
result += 'scale('+str(obj.Scale)+','+str(obj.Scale)+')'
result += '">\n'
result += svgbody
result += '</g>\n'
obj.ViewResult = result
def __getstate__(self):
return self.Type
def __setstate__(self,state):
if state:
self.Type = state
def getDisplayModes(self,vobj):
modes=["Default"]
return modes
def setDisplayMode(self,mode):
return mode
def getDXF(self,obj):
"returns a DXF representation of the view"
if obj.RenderingMode == "Solid":
print("Unable to get DXF from Solid mode: ",obj.Label)
return ""
result = []
import Drawing
if not hasattr(self,"baseshape"):
self.onChanged(obj,"Source")
if hasattr(self,"baseshape"):
if self.baseshape:
result.append(Drawing.projectToDXF(self.baseshape,self.direction))
if hasattr(self,"sectionshape"):
if self.sectionshape:
result.append(Drawing.projectToDXF(self.sectionshape,self.direction))
if hasattr(self,"hiddenshape"):
if self.hiddenshape:
result.append(Drawing.projectToDXF(self.hiddenshape,self.direction))
return result
class SectionPlaneTaskPanel:
'''A TaskPanel for all the section plane object'''
def __init__(self):
# the panel has a tree widget that contains categories
# for the subcomponents, such as additions, subtractions.
# the categories are shown only if they are not empty.
self.obj = None
self.form = QtGui.QWidget()
self.form.setObjectName("TaskPanel")
self.grid = QtGui.QGridLayout(self.form)
self.grid.setObjectName("grid")
self.title = QtGui.QLabel(self.form)
self.grid.addWidget(self.title, 0, 0, 1, 2)
# tree
self.tree = QtGui.QTreeWidget(self.form)
self.grid.addWidget(self.tree, 1, 0, 1, 2)
self.tree.setColumnCount(1)
self.tree.header().hide()
# buttons
self.addButton = QtGui.QPushButton(self.form)
self.addButton.setObjectName("addButton")
self.addButton.setIcon(QtGui.QIcon(":/icons/Arch_Add.svg"))
self.grid.addWidget(self.addButton, 3, 0, 1, 1)
self.delButton = QtGui.QPushButton(self.form)
self.delButton.setObjectName("delButton")
self.delButton.setIcon(QtGui.QIcon(":/icons/Arch_Remove.svg"))
self.grid.addWidget(self.delButton, 3, 1, 1, 1)
QtCore.QObject.connect(self.addButton, QtCore.SIGNAL("clicked()"), self.addElement)
QtCore.QObject.connect(self.delButton, QtCore.SIGNAL("clicked()"), self.removeElement)
self.update()
def isAllowedAlterSelection(self):
return True
def isAllowedAlterView(self):
return True
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Ok)
def getIcon(self,obj):
if hasattr(obj.ViewObject,"Proxy"):
return QtGui.QIcon(obj.ViewObject.Proxy.getIcon())
elif obj.isDerivedFrom("Sketcher::SketchObject"):
return QtGui.QIcon(":/icons/Sketcher_Sketch.svg")
elif obj.isDerivedFrom("App::DocumentObjectGroup"):
return QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_DirIcon)
else:
return QtGui.QIcon(":/icons/Tree_Part.svg")
def update(self):
'fills the treewidget'
self.tree.clear()
if self.obj:
for o in self.obj.Objects:
item = QtGui.QTreeWidgetItem(self.tree)
item.setText(0,o.Label)
item.setToolTip(0,o.Name)
item.setIcon(0,self.getIcon(o))
self.retranslateUi(self.form)
def addElement(self):
if self.obj:
for o in FreeCADGui.Selection.getSelection():
ArchComponent.addToComponent(self.obj,o,"Objects")
self.update()
def removeElement(self):
if self.obj:
it = self.tree.currentItem()
if it:
comp = FreeCAD.ActiveDocument.getObject(str(it.toolTip(0)))
ArchComponent.removeFromComponent(self.obj,comp)
self.update()
def accept(self):
FreeCAD.ActiveDocument.recompute()
FreeCADGui.ActiveDocument.resetEdit()
return True
def retranslateUi(self, TaskPanel):
TaskPanel.setWindowTitle(QtGui.QApplication.translate("Arch", "Objects", None))
self.delButton.setText(QtGui.QApplication.translate("Arch", "Remove", None))
self.addButton.setText(QtGui.QApplication.translate("Arch", "Add", None))
self.title.setText(QtGui.QApplication.translate("Arch", "Objects seen by this section plane", None))
if FreeCAD.GuiUp:
FreeCADGui.addCommand('Arch_SectionPlane',_CommandSectionPlane())
| bblacey/FreeCAD-MacOS-CI | src/Mod/Arch/ArchSectionPlane.py | Python | lgpl-2.1 | 31,579 |
'''Document model'''
# Copyright 2013 Christopher Foo <chris.foo@gmail.com>
# Licensed under GPLv3. See COPYING.txt for details.
from .binary import *
from .block import *
from .common import *
from .field import *
from .record import *
from .warc import *
| chfoo/warcat | warcat/model/__init__.py | Python | gpl-3.0 | 257 |
"""
Copyright (c) 2009 Simon Hofer
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
#import modules required by application
import objc
import Foundation
import AppKit
from PyObjCTools import AppHelper
# import modules containing classes required to start application
import ReaderController
# pass control to AppKit
AppHelper.runEventLoop()
| saeimn/ESReader | Source/main.py | Python | mit | 1,325 |
# coding=utf-8
__author__ = 'kohlmannj'
import os
import Ity
from Ity.Utilities.FilePaths import get_files_in_path, get_valid_path
class Corpus(object):
def __init__(
self,
path,
name=None,
extensions=(".txt",),
texts_path=None,
metadata_path=None,
output_path=None
):
# Main Corpus Path
if type(path) is not str:
raise ValueError("Invalid path argument provided.")
# If we didn't get an absolute path, assume it's a path relative to Ity.corpus_root.
if not os.path.isabs(path):
path = os.path.join(Ity.corpus_root, path)
# This call to os.path.abspath(), among other things, removes trailing
# slashes from the path.
self.path = os.path.abspath(path)
# Okay, does the path actually exist?
if not os.path.exists(self.path):
raise IOError("Corpus at path '%s' does not exist." % self.path)
# Texts Path
self.texts_path = get_valid_path(
path=texts_path,
relative_path_base=self.path,
fallback_path=self.path
)
# It's NOT okay if this path doesn't exist.
if type(self.texts_path) is not str or not os.path.exists(self.texts_path):
raise ValueError("Path to texts ('%s') doesn't exist." % self.texts_path)
# Corpus Name
if name is None or type(name) is not str:
name = os.path.basename(self.path)
self.name = name
# Metadata Path
self.metadata_path = get_valid_path(
path=metadata_path,
relative_path_base=self.path,
fallback_path=os.path.join(Ity.metadata_root, self.name)
)
# Output Path
self.output_path = get_valid_path(
path=output_path,
relative_path_base=self.path,
fallback_path=os.path.join(Ity.output_root, self.name)
)
# Extensions
if extensions is None or type(extensions) is str or len(extensions) == 0:
raise ValueError("Invalid extensions argument provided.")
self.extensions = extensions
self._texts = None
self.metadata = {}
self.batch_format_data = {}
@property
def texts(self):
if self._texts is None:
self._texts = get_files_in_path(self.texts_path, self.extensions)
return self._texts
| uwgraphics/Ubiqu-Ity | Ity/Utilities/Corpus.py | Python | bsd-2-clause | 2,420 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.attribute import Attribute, FieldAttribute
#from ansible.utils.display import deprecated
class Become:
# Privlege escalation
_become = FieldAttribute(isa='bool')
_become_method = FieldAttribute(isa='string')
_become_user = FieldAttribute(isa='string')
_become_pass = FieldAttribute(isa='string')
def __init__(self):
return super(Become, self).__init__()
def _detect_privilege_escalation_conflict(self, ds):
# Fail out if user specifies conflicting privilege escalations
has_become = 'become' in ds or 'become_user'in ds
has_sudo = 'sudo' in ds or 'sudo_user' in ds
has_su = 'su' in ds or 'su_user' in ds
if has_become:
msg = 'The become params ("become", "become_user") and'
if has_sudo:
raise AnsibleParserError('%s sudo params ("sudo", "sudo_user") cannot be used together' % msg)
elif has_su:
raise AnsibleParserError('%s su params ("su", "su_user") cannot be used together' % msg)
elif has_sudo and has_su:
raise AnsibleParserError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
def _preprocess_data_become(self, ds):
"""Preprocess the playbook data for become attributes
This is called from the Base object's preprocess_data() method which
in turn is called pretty much anytime any sort of playbook object
(plays, tasks, blocks, etc) are created.
"""
self._detect_privilege_escalation_conflict(ds)
# Setting user implies setting become/sudo/su to true
if 'become_user' in ds and not ds.get('become', False):
ds['become'] = True
# Privilege escalation, backwards compatibility for sudo/su
if 'sudo' in ds or 'sudo_user' in ds:
ds['become_method'] = 'sudo'
if 'sudo' in ds:
ds['become'] = ds['sudo']
del ds['sudo']
else:
ds['become'] = True
if 'sudo_user' in ds:
ds['become_user'] = ds['sudo_user']
del ds['sudo_user']
#deprecated("Instead of sudo/sudo_user, use become/become_user and set become_method to 'sudo' (default)")
elif 'su' in ds or 'su_user' in ds:
ds['become_method'] = 'su'
if 'su' in ds:
ds['become'] = ds['su']
del ds['su']
else:
ds['become'] = True
if 'su_user' in ds:
ds['become_user'] = ds['su_user']
del ds['su_user']
#deprecated("Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)")
# if we are becoming someone else, but some fields are unset,
# make sure they're initialized to the default config values
if ds.get('become', False):
if ds.get('become_method', None) is None:
ds['become_method'] = C.DEFAULT_BECOME_METHOD
if ds.get('become_user', None) is None:
ds['become_user'] = C.DEFAULT_BECOME_USER
return ds
def _get_attr_become(self):
'''
Override for the 'become' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become')
else:
return self._attributes['become']
def _get_attr_become_method(self):
'''
Override for the 'become_method' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become_method')
else:
return self._attributes['become_method']
def _get_attr_become_user(self):
'''
Override for the 'become_user' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become_user')
else:
return self._attributes['become_user']
def _get_attr_become_password(self):
'''
Override for the 'become_password' getattr fetcher, used from Base.
'''
if hasattr(self, '_get_parent_attribute'):
return self._get_parent_attribute('become_password')
else:
return self._attributes['become_password']
| dguerri/ansible | lib/ansible/playbook/become.py | Python | gpl-3.0 | 5,422 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import flask
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
import os
import codecs
import re
import sys
import struct
import json
import requests
import argparse
import getpass
import threading
import werkzeug.serving
import pokemon_pb2
import time
import httplib
import urllib
import types
import numpy as np
from google.protobuf.internal import encoder
from google.protobuf.message import DecodeError
from s2sphere import *
from datetime import datetime, timedelta
from geopy.geocoders import GoogleV3
from gpsoauth import perform_master_login, perform_oauth
from geopy.exc import GeocoderTimedOut, GeocoderServiceError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.adapters import ConnectionError
from requests.models import InvalidURL
from transform import *
from math import radians, cos, sin, asin, sqrt
from argparse import Namespace
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
API_URL = 'https://pgorelease.nianticlabs.com/plfe/rpc'
LOGIN_URL = \
'https://sso.pokemon.com/sso/login?service=https://sso.pokemon.com/sso/oauth2.0/callbackAuthorize'
LOGIN_OAUTH = 'https://sso.pokemon.com/sso/oauth2.0/accessToken'
APP = 'com.nianticlabs.pokemongo'
with open('credentials.json') as file:
credentials = json.load(file)
PTC_CLIENT_SECRET = credentials.get('ptc_client_secret', None)
ANDROID_ID = credentials.get('android_id', None)
SERVICE = credentials.get('service', None)
CLIENT_SIG = credentials.get('client_sig', None)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
SESSION = requests.session()
SESSION.headers.update({'User-Agent': 'Niantic App'})
SESSION.verify = False
global_password = None
global_token = None
access_token = None
DEBUG = True
VERBOSE_DEBUG = False # if you want to write raw request/response to the console
COORDS_LATITUDE = 0
COORDS_LONGITUDE = 0
COORDS_ALTITUDE = 0
FLOAT_LAT = 0
FLOAT_LONG = 0
NEXT_LAT = 0
NEXT_LONG = 0
auto_refresh = 0
default_step = 0.001
api_endpoint = None
pokemons = {}
gyms = {}
pokestops = {}
numbertoteam = { # At least I'm pretty sure that's it. I could be wrong and then I'd be displaying the wrong owner team of gyms.
0: 'Gym',
1: 'Mystic',
2: 'Valor',
3: 'Instinct',
}
origin_lat, origin_lon = None, None
is_ampm_clock = False
spotted_pokemon = {}
max_idle_time = timedelta(seconds=300)
api_last_response = datetime.now()
wait_to_reconnect = 60
first_connection = True
api_endpoint = None
access_token = None
profile_response = None
# stuff for in-background search thread
search_thread = None
class memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
fn = functools.partial(self.__call__, obj)
fn.reset = self._reset
return fn
def _reset(self):
self.cache = {}
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def debug(message):
if DEBUG:
print '[-] {}'.format(message)
def time_left(ms):
s = ms / 1000
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
return (h, m, s)
def lonlat_to_meters(lat1, lon1, lat2, lon2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
# earth radius in meters: 6378100
m = 6378100 * c
return m
def bearing_degrees(lat1, lon1, lat2, lon2):
"""
Convert location in bearing degrees to be able to give a direction of where the Pokemon is located.
:param lat1: user location latitude
:param lon1: user location longitude
:param lat2: pokemon location latitude
:param lon2: pokemon location longitude
:return: bearing degrees
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# calculate the angle
dlon = lon2 - lon1
dlat = lat2 - lat1
x = math.sin(dlon) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(dlon))
initial_bearing = math.atan2(x, y)
initial_bearing = math.degrees(initial_bearing)
bearing = (initial_bearing + 360) % 360
bearing = int(bearing)
return bearing
def bearing_degrees_to_compass_direction(bearing):
"""
Converts bearing degrees in easy to read North-East-West-South direction
:param bearing: bearing in degrees
:return: North, Northeast, East, etc
"""
if bearing >= 0 and bearing < 23:
direction = 'north'
elif bearing >= 23 and bearing < 68:
direction = 'northeast'
elif bearing >= 68 and bearing < 113:
direction = 'east'
elif bearing >= 113 and bearing < 158:
direction = 'southeast'
elif bearing >= 158 and bearing < 203:
direction = 'south'
elif bearing >= 203 and bearing < 248:
direction = 'southwest'
elif bearing >= 248 and bearing < 293:
direction = 'west'
elif bearing >= 293 and bearing < 338:
direction = 'northwest'
elif bearing >= 338 and bearing <= 360:
direction = 'north'
return direction
def encode(cellid):
output = []
encoder._VarintEncoder()(output.append, cellid)
return ''.join(output)
def getNeighbors():
origin = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
walk = [origin.id()]
# 10 before and 10 after
next = origin.next()
prev = origin.prev()
for i in range(10):
walk.append(prev.id())
walk.append(next.id())
next = next.next()
prev = prev.prev()
return walk
def f2i(float):
return struct.unpack('<Q', struct.pack('<d', float))[0]
def f2h(float):
return hex(struct.unpack('<Q', struct.pack('<d', float))[0])
def h2f(hex):
return struct.unpack('<d', struct.pack('<Q', int(hex, 16)))[0]
def strtr(s, repl):
pattern = '|'.join(map(re.escape, sorted(repl, key=len, reverse=True)))
return re.sub(pattern, lambda m: repl[m.group()], s)
def merge(x,y):
# store a copy of x, but overwrite with y's values where applicable
merged = dict(x,**y)
xkeys = x.keys()
# if the value of merged[key] was overwritten with y[key]'s value
# then we need to put back any missing x[key] values
for key in xkeys:
# if this key is a dictionary, recurse
if type(x[key]) is types.DictType and y.has_key(key):
merged[key] = merge(x[key],y[key])
return merged
def retrying_set_location(location_name):
"""
Continue trying to get co-ords from Google Location until we have them
:param location_name: string to pass to Location API
:return: None
"""
while True:
try:
set_location(location_name)
return
except (GeocoderTimedOut, GeocoderServiceError), e:
debug(
'retrying_set_location: geocoder exception ({}), retrying'.format(
str(e)))
time.sleep(1.25)
def set_location(location_name):
geolocator = GoogleV3()
prog = re.compile('^(\-?\d+(\.\d+)?),\s*(\-?\d+(\.\d+)?)$')
global origin_lat
global origin_lon
if prog.match(location_name):
local_lat, local_lng = [float(x) for x in location_name.split(",")]
alt = 0
origin_lat, origin_lon = local_lat, local_lng
else:
loc = geolocator.geocode(location_name)
origin_lat, origin_lon = local_lat, local_lng = loc.latitude, loc.longitude
alt = loc.altitude
print '[!] Your given location: {}'.format(loc.address.encode('utf-8'))
print('[!] lat/long/alt: {} {} {}'.format(local_lat, local_lng, alt))
set_location_coords(local_lat, local_lng, alt)
def set_location_coords(lat, long, alt):
global COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE
global FLOAT_LAT, FLOAT_LONG
FLOAT_LAT = lat
FLOAT_LONG = long
COORDS_LATITUDE = f2i(lat) # 0x4042bd7c00000000 # f2i(lat)
COORDS_LONGITUDE = f2i(long) # 0xc05e8aae40000000 #f2i(long)
COORDS_ALTITUDE = f2i(alt)
def get_location_coords():
return (COORDS_LATITUDE, COORDS_LONGITUDE, COORDS_ALTITUDE)
def retrying_api_req(service, api_endpoint, access_token, *args, **kwargs):
while True:
try:
response = api_req(service, api_endpoint, access_token, *args,
**kwargs)
if response:
return response
debug('retrying_api_req: api_req returned None, retrying')
except (InvalidURL, ConnectionError, DecodeError), e:
debug('retrying_api_req: request error ({}), retrying'.format(
str(e)))
time.sleep(1)
def api_req(service, api_endpoint, access_token, *args, **kwargs):
p_req = pokemon_pb2.RequestEnvelop()
p_req.rpc_id = 1469378659230941192
p_req.unknown1 = 2
(p_req.latitude, p_req.longitude, p_req.altitude) = \
get_location_coords()
p_req.unknown12 = 989
if 'useauth' not in kwargs or not kwargs['useauth']:
p_req.auth.provider = service
p_req.auth.token.contents = access_token
p_req.auth.token.unknown13 = 14
else:
p_req.unknown11.unknown71 = kwargs['useauth'].unknown71
p_req.unknown11.unknown72 = kwargs['useauth'].unknown72
p_req.unknown11.unknown73 = kwargs['useauth'].unknown73
for arg in args:
p_req.MergeFrom(arg)
protobuf = p_req.SerializeToString()
r = SESSION.post(api_endpoint, data=protobuf, verify=False)
p_ret = pokemon_pb2.ResponseEnvelop()
p_ret.ParseFromString(r.content)
if VERBOSE_DEBUG:
print 'REQUEST:'
print p_req
print 'RESPONSE:'
print p_ret
print '''
'''
time.sleep(0.51)
return p_ret
def get_api_endpoint(service, access_token, api=API_URL):
profile_response = None
attempt_get_api_endpoint = 1
while not profile_response and attempt_get_api_endpoint <= 5:
profile_response = retrying_get_profile(service, access_token, api,
None)
if not hasattr(profile_response, 'api_url'):
debug(
'get_api_endpoint: retrying_get_profile: get_profile returned no api_url, retrying '
+ str(attempt_get_api_endpoint))
profile_response = None
attempt_get_api_endpoint += 1
continue
if not len(profile_response.api_url):
debug(
'get_api_endpoint: retrying_get_profile returned no-len api_url, retrying'
+ str(attempt_get_api_endpoint))
profile_response = None
attempt_get_api_endpoint += 1
if profile_response:
api_endpoint = 'https://%s/rpc' % profile_response.api_url
else:
api_endpoint = None
return api_endpoint
def retrying_get_profile(service, access_token, api, useauth, *reqq):
profile_response = None
attempt_get_profile = 1
while not profile_response and attempt_get_profile <= 5:
profile_response = get_profile(service, access_token, api, useauth,
*reqq)
if not hasattr(profile_response, 'payload'):
debug(
'retrying_get_profile: get_profile returned no payload, retrying ' + str(attempt_get_profile))
profile_response = None
attempt_get_profile += 1
continue
if not profile_response.payload:
debug(
'retrying_get_profile: get_profile returned no-len payload, retrying ' + str(attempt_get_profile))
profile_response = None
attempt_get_profile += 1
return profile_response
def get_profile(service, access_token, api, useauth, *reqq):
req = pokemon_pb2.RequestEnvelop()
req1 = req.requests.add()
req1.type = 2
if len(reqq) >= 1:
req1.MergeFrom(reqq[0])
req2 = req.requests.add()
req2.type = 126
if len(reqq) >= 2:
req2.MergeFrom(reqq[1])
req3 = req.requests.add()
req3.type = 4
if len(reqq) >= 3:
req3.MergeFrom(reqq[2])
req4 = req.requests.add()
req4.type = 129
if len(reqq) >= 4:
req4.MergeFrom(reqq[3])
req5 = req.requests.add()
req5.type = 5
if len(reqq) >= 5:
req5.MergeFrom(reqq[4])
return retrying_api_req(service, api, access_token, req, useauth=useauth)
def login_google(username, password):
print '[!] Google login for: {}'.format(username)
r1 = perform_master_login(username, password, ANDROID_ID)
r2 = perform_oauth(username,
r1.get('Token', ''),
ANDROID_ID,
SERVICE,
APP,
CLIENT_SIG, )
return r2.get('Auth')
def login_ptc(username, password):
print '[!] PTC login for: {}'.format(username)
head = {'User-Agent': 'Niantic App'}
r = SESSION.get(LOGIN_URL, headers=head)
if r is None:
return render_template('nope.html', fullmap=fullmap)
try:
jdata = json.loads(r.content)
except ValueError, e:
debug('login_ptc: could not decode JSON from {}'.format(r.content))
return None
# Maximum password length is 15 (sign in page enforces this limit, API does not)
if len(password) > 15:
print '[!] Trimming password to 15 characters'
password = password[:15]
data = {
'lt': jdata['lt'],
'execution': jdata['execution'],
'_eventId': 'submit',
'username': username,
'password': password,
}
r1 = SESSION.post(LOGIN_URL, data=data, headers=head)
ticket = None
try:
ticket = re.sub('.*ticket=', '', r1.history[0].headers['Location'])
except Exception, e:
if DEBUG:
print r1.json()['errors'][0]
return None
data1 = {
'client_id': 'mobile-app_pokemon-go',
'redirect_uri': 'https://www.nianticlabs.com/pokemongo/error',
'client_secret': PTC_CLIENT_SECRET,
'grant_type': 'refresh_token',
'code': ticket,
}
r2 = SESSION.post(LOGIN_OAUTH, data=data1)
access_token = re.sub('&expires.*', '', r2.content)
access_token = re.sub('.*access_token=', '', access_token)
return access_token
def get_heartbeat(service,
api_endpoint,
access_token,
response, ):
m4 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleInt()
m.f1 = int(time.time() * 1000)
m4.message = m.SerializeToString()
m5 = pokemon_pb2.RequestEnvelop.Requests()
m = pokemon_pb2.RequestEnvelop.MessageSingleString()
m.bytes = '05daf51635c82611d1aac95c0b051d3ec088a930'
m5.message = m.SerializeToString()
walk = sorted(getNeighbors())
m1 = pokemon_pb2.RequestEnvelop.Requests()
m1.type = 106
m = pokemon_pb2.RequestEnvelop.MessageQuad()
m.f1 = ''.join(map(encode, walk))
m.f2 = \
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000"
m.lat = COORDS_LATITUDE
m.long = COORDS_LONGITUDE
m1.message = m.SerializeToString()
response = get_profile(service,
access_token,
api_endpoint,
response.unknown7,
m1,
pokemon_pb2.RequestEnvelop.Requests(),
m4,
pokemon_pb2.RequestEnvelop.Requests(),
m5, )
try:
payload = response.payload[0]
except (AttributeError, IndexError):
return
heartbeat = pokemon_pb2.ResponseEnvelop.HeartbeatPayload()
heartbeat.ParseFromString(payload)
return heartbeat
def get_token(service, username, password):
"""
Get token if it's not None
:return:
:rtype:
"""
global global_token
if True: # global_token is None:
if service == 'ptc':
global_token = login_ptc(username, password)
else:
global_token = login_google(username, password)
return global_token
else:
return global_token
def send_to_slack(text, username, icon_emoji, webhook):
values = {'payload': '{"username": "' + username + '", '
'"icon_emoji": "' + icon_emoji + '", '
'"text": "' + text + '"}'
}
str_values = {}
for k, v in values.items():
str_values[k] = unicode(v).encode('utf-8')
data = urllib.urlencode(str_values)
h = httplib.HTTPSConnection('hooks.slack.com')
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
h.request('POST', webhook, data, headers)
r = h.getresponse()
# ack = r.read()
# print data
# print ack
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--auth_service', type=str.lower, help='Auth Service', default='ptc')
parser.add_argument('-u', '--username', help='Username', required=True)
parser.add_argument('-p', '--password', help='Password', required=False)
parser.add_argument(
'-l', '--location', type=parse_unicode, help='Location', required=True)
parser.add_argument('-st', '--step-limit', help='Steps', required=True)
parser.add_argument('-sw', '--slack-webhook', help='slack webhook urlpath /services/.../../...', required=True)
parser.add_argument('-r', '--range', help='max range of pokemon for notifactions in meters', required=True)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-i', '--ignore', help='Comma-separated list of Pokémon names or IDs to ignore')
group.add_argument(
'-o', '--only', help='Comma-separated list of Pokémon names or IDs to search')
parser.add_argument(
"-ar",
"--auto_refresh",
help="Enables an autorefresh that behaves the same as a page reload. " +
"Needs an integer value for the amount of seconds")
parser.add_argument(
'-pi',
'--pokemon-icons',
help='If you have pokemon emojis in Slack, you can give the prefix here, e.g.: \':\' if your emojis are named' +
':pokename:, \':pokemon-:\' if they are named :pokemon-pokename:. :pokeball: is default.',
default=':pokeball:')
parser.add_argument(
'-dp',
'--display-pokestop',
help='Display pokéstop',
action='store_true',
default=False)
parser.add_argument(
'-dg',
'--display-gym',
help='Display Gym',
action='store_true',
default=False)
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1')
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000)
parser.add_argument(
"-L",
"--locale",
help="Locale for Pokemon names: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-iL",
"--icon-locale",
help="Locale for Pokemon icons: default en, check locale folder for more options",
default="en")
parser.add_argument(
"-ol",
"--onlylure",
help='Display only lured pokéstop',
action='store_true')
parser.add_argument(
'-c',
'--china',
help='Coordinates transformer for China',
action='store_true')
parser.add_argument(
"-pm",
"--ampm_clock",
help="Toggles the AM/PM clock for Pokemon timers",
action='store_true',
default=False)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true')
parser.set_defaults(DEBUG=True)
return parser.parse_args()
class connection:
@memoized
def login(self, args):
global global_password
if not global_password:
if args.password:
global_password = args.password
else:
global_password = getpass.getpass()
profile_response = None
while profile_response is None or not profile_response.payload:
access_token = get_token(args.auth_service, args.username, global_password)
if access_token is None:
print '[-] access_token is None: wrong username/password?'
time.sleep(wait_to_reconnect)
continue
print '[+] RPC Session Token: {} ...'.format(access_token[:25])
api_endpoint = get_api_endpoint(args.auth_service, access_token)
if api_endpoint is None:
print '[-] No response from RPC server'
time.sleep(wait_to_reconnect)
continue
print '[+] Received API endpoint: {}'.format(api_endpoint)
profile_response = retrying_get_profile(args.auth_service, access_token,
api_endpoint, None)
if profile_response is None or not profile_response.payload:
print 'Could not get profile, retrying connecting'
time.sleep(wait_to_reconnect)
print '[+] Login successful'
payload = profile_response.payload[0]
profile = pokemon_pb2.ResponseEnvelop.ProfilePayload()
profile.ParseFromString(payload)
print '[+] Username: {}'.format(profile.profile.username)
creation_time = \
datetime.fromtimestamp(int(profile.profile.creation_time)
/ 1000)
print '[+] You started playing Pokemon Go on: {}'.format(
creation_time.strftime('%Y-%m-%d %H:%M:%S'))
for curr in profile.profile.currency:
print '[+] {}: {}'.format(curr.type, curr.amount)
return api_endpoint, access_token, profile_response
def main():
full_path = os.path.realpath(__file__)
(path, filename) = os.path.split(full_path)
args = get_args()
if args.auth_service not in ['ptc', 'google']:
print '[!] Invalid Auth service specified'
return
print('[+] Locale is ' + args.locale + ' and icon locale is ' + args.icon_locale)
pokemonsJSON = json.load(
codecs.open(path + '/locales/pokemon.' + args.locale + '.json', "r", 'UTF-8'))
pokemonsJSON_icons = json.load(
codecs.open(path + '/locales/pokemon.' + args.icon_locale + '.json', "r", 'UTF-8'))
translationsJSON = json.load(
codecs.open(path + '/locales/translations.en.json', "r", 'UTF-8'))
if os.path.isfile(path + '/locales/translations.' + args.locale + '.json'):
overrideTranslationsJSON = json.load(
codecs.open(path + '/locales/translations.' + args.locale + '.json', "r", 'UTF-8'));
translationsJSON = merge(translationsJSON, overrideTranslationsJSON)
if args.debug:
global DEBUG
DEBUG = True
print '[!] DEBUG mode on'
# only get location for first run
if not (FLOAT_LAT and FLOAT_LONG):
print('[+] Getting initial location')
retrying_set_location(args.location)
if args.auto_refresh:
global auto_refresh
auto_refresh = int(args.auto_refresh) * 1000
if args.ampm_clock:
global is_ampm_clock
is_ampm_clock = True
global api_last_response, first_connection, api_endpoint, access_token, profile_response
if first_connection:
print '[+] Connecting'
api_endpoint, access_token, profile_response = connection.login(args)
api_last_response = datetime.now()
first_connection = False
elif datetime.now() - api_last_response > max_idle_time and args.auth_service == 'google':
print '[!] Resetting connection...'
connection.login.reset()
time.sleep(wait_to_reconnect)
api_endpoint, access_token, profile_response = connection.login(args)
api_last_response = datetime.now()
clear_stale_pokemons()
steplimit = int(args.step_limit)
global max_distance
max_distance = int(args.range)
global slack_webhook_urlpath
slack_webhook_urlpath = str(args.slack_webhook)
global pokemon_icons_prefix
if args.pokemon_icons:
pokemon_icons_prefix = args.pokemon_icons
else:
pokemon_icons_prefix = False
ignore = []
only = []
if args.ignore:
ignore = [i.lower().strip() for i in args.ignore.split(',')]
elif args.only:
only = [i.lower().strip() for i in args.only.split(',')]
pos = 1
x = 0
y = 0
dx = 0
dy = -1
steplimit2 = steplimit**2
for step in range(steplimit2):
#starting at 0 index
debug('looping: step {} of {}'.format((step+1), steplimit**2))
#debug('steplimit: {} x: {} y: {} pos: {} dx: {} dy {}'.format(steplimit2, x, y, pos, dx, dy))
# Scan location math
if -steplimit2 / 2 < x <= steplimit2 / 2 and -steplimit2 / 2 < y <= steplimit2 / 2:
set_location_coords(x * 0.0025 + origin_lat, y * 0.0025 + origin_lon, 0)
if x == y or x < 0 and x == -y or x > 0 and x == 1 - y:
(dx, dy) = (-dy, dx)
(x, y) = (x + dx, y + dy)
process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, pokemonsJSON_icons, translationsJSON, ignore, only)
print('Completed: ' + str(
((step+1) + pos * .25 - .25) / (steplimit2) * 100) + '%')
global NEXT_LAT, NEXT_LONG
if (NEXT_LAT and NEXT_LONG and
(NEXT_LAT != FLOAT_LAT or NEXT_LONG != FLOAT_LONG)):
print('Update to next location %f, %f' % (NEXT_LAT, NEXT_LONG))
set_location_coords(NEXT_LAT, NEXT_LONG, 0)
NEXT_LAT = 0
NEXT_LONG = 0
else:
set_location_coords(origin_lat, origin_lon, 0)
register_background_thread()
def process_step(args, api_endpoint, access_token, profile_response,
pokemonsJSON, pokemonsJSON_icons, translationsJSON, ignore, only):
print('[+] Searching for Pokemon at location {} {}'.format(FLOAT_LAT, FLOAT_LONG))
origin = LatLng.from_degrees(FLOAT_LAT, FLOAT_LONG)
step_lat = FLOAT_LAT
step_long = FLOAT_LONG
parent = CellId.from_lat_lng(LatLng.from_degrees(FLOAT_LAT,
FLOAT_LONG)).parent(15)
h = get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response)
hs = [h]
seen = {}
for child in parent.children():
latlng = LatLng.from_point(Cell(child).get_center())
set_location_coords(latlng.lat().degrees, latlng.lng().degrees, 0)
hs.append(
get_heartbeat(args.auth_service, api_endpoint, access_token,
profile_response))
set_location_coords(step_lat, step_long, 0)
visible = []
for hh in hs:
try:
for cell in hh.cells:
for wild in cell.WildPokemon:
hash = wild.SpawnPointId
disappear_timestamp = time.time() + wild.TimeTillHiddenMs / 1000
if hash not in seen.keys() or \
wild.pokemon.PokemonId != seen[hash]['PokemonId'] \
or seen[hash]['disappear_timestamp'] < disappear_timestamp - 300:
visible.append(wild)
seen[hash] = {'disappear_timestamp': disappear_timestamp,
'PokemonId': wild.pokemon.PokemonId}
if cell.Fort:
for Fort in cell.Fort:
if Fort.Enabled == True:
if args.china:
(Fort.Latitude, Fort.Longitude) = \
transform_from_wgs_to_gcj(Location(Fort.Latitude, Fort.Longitude))
if Fort.GymPoints and args.display_gym:
gyms[Fort.FortId] = [Fort.Team, Fort.Latitude,
Fort.Longitude, Fort.GymPoints]
elif Fort.FortType:
expire_time = 0
if Fort.LureInfo.LureExpiresTimestampMs:
if Fort.LureInfo.ActivePokemonId:
hash = Fort.LureInfo.FortId
disappear_timestamp = Fort.LureInfo.LureExpiresTimestampMs / 1000
if hash not in seen.keys() \
or Fort.LureInfo.ActivePokemonId != seen[hash].PokemonId \
or seen[hash].disappear_timestamp < disappear_timestamp - 300:
lured = Namespace()
lured.lured = True
lured.pokemon = Namespace()
lured.pokemon.PokemonId = Fort.LureInfo.ActivePokemonId
lured.Latitude = Fort.Latitude + 0.00007 * np.random.normal()
lured.Longitude = Fort.Longitude + 0.00007 * np.random.normal()
lured.SpawnPointId = 'Fort_' + Fort.LureInfo.FortId
lured.TimeTillHiddenMs = Fort.LureInfo.LureExpiresTimestampMs - \
time.time() * 1000
visible.append(lured)
seen[hash] = {'disappear_timestamp': disappear_timestamp,
'PokemonId': Fort.LureInfo.ActivePokemonId}
expire_time = datetime\
.fromtimestamp(Fort.LureInfo.LureExpiresTimestampMs / 1000.0)\
.strftime("%H:%M:%S")
if args.display_pokestop and (expire_time != 0 or not args.onlylure):
pokestops[Fort.FortId] = [Fort.Latitude,
Fort.Longitude, expire_time]
except AttributeError:
break
for poke in visible:
pokeid = str(poke.pokemon.PokemonId)
pokename = pokemonsJSON[pokeid]
pokename_icon = pokemonsJSON_icons[pokeid]
disappear_timestamp = time.time() + poke.TimeTillHiddenMs / 1000
if args.ignore:
if pokename.lower() in ignore or pokeid in ignore:
continue
elif args.only:
if pokename.lower() not in only and pokeid not in only:
continue
if poke.SpawnPointId in spotted_pokemon.keys():
if spotted_pokemon[poke.SpawnPointId]['disappear_datetime'] > datetime.now():
continue
if poke.TimeTillHiddenMs < 0:
continue
if args.china:
(poke.Latitude, poke.Longitude) = \
transform_from_wgs_to_gcj(Location(poke.Latitude,
poke.Longitude))
disappear_datetime = datetime.fromtimestamp(disappear_timestamp)
distance = lonlat_to_meters(origin_lat, origin_lon, poke.Latitude, poke.Longitude)
if distance < max_distance:
time_till_disappears = disappear_datetime - datetime.now()
disappear_hours, disappear_remainder = divmod(time_till_disappears.seconds, 3600)
disappear_minutes, disappear_seconds = divmod(disappear_remainder, 60)
disappear_minutes = str(disappear_minutes)
disappear_seconds = str(disappear_seconds)
if len(disappear_seconds) == 1:
disappear_seconds = str(0) + disappear_seconds
disappear_time = disappear_datetime.strftime(translationsJSON['time_format'])
# calculate direction of Pokemon in bearing degrees
direction = bearing_degrees(origin_lat, origin_lon, poke.Latitude, poke.Longitude)
# transform in compass direction
direction = bearing_degrees_to_compass_direction(direction)
alert_text = strtr(translationsJSON['spotted_pokemon'], {
'#{pokename}': pokename,
'#{latitude}': str(poke.Latitude),
'#{longitude}': str(poke.Longitude),
'#{distance}': "{0:.2f}".format(distance),
'#{direction}': translationsJSON['directions'][direction],
'#{disappear_time}': disappear_time,
'#{disappear_minutes}': disappear_minutes,
'#{disappear_seconds}': disappear_seconds,
'#{host}': args.host,
'#{port}': str(args.port)})
if pokemon_icons_prefix != ':pokeball:':
user_icon = pokemon_icons_prefix + pokename_icon.lower() + ':'
else:
user_icon = ':pokeball:'
try:
if poke.lured:
send_to_slack(alert_text, pokename + ' (lured)', user_icon, slack_webhook_urlpath)
else:
send_to_slack(alert_text, pokename, user_icon, slack_webhook_urlpath)
except:
send_to_slack(alert_text, pokename, user_icon, slack_webhook_urlpath)
spotted_pokemon[poke.SpawnPointId] = {'disappear_datetime': disappear_datetime, 'pokename': pokename}
# print(r.status_code, r.reason)
global api_last_response
api_last_response = datetime.now()
pokemons[poke.SpawnPointId] = {
"lat": poke.Latitude,
"lng": poke.Longitude,
"disappear_time": disappear_timestamp,
"id": poke.pokemon.PokemonId,
"name": pokename
}
def clear_stale_pokemons():
current_time = time.time()
for pokemon_key in pokemons.keys():
pokemon = pokemons[pokemon_key]
if current_time > pokemon['disappear_time']:
print "[+] removing stale pokemon %s at %f, %f from list" % (
pokemon['name'].encode('utf-8'), pokemon['lat'], pokemon['lng'])
del pokemons[pokemon_key]
def register_background_thread(initial_registration=False):
"""
Start a background thread to search for Pokemon
while Flask is still able to serve requests for the map
:param initial_registration: True if first registration and thread should start immediately, False if it's being called by the finishing thread to schedule a refresh
:return: None
"""
debug('register_background_thread called')
global search_thread
if initial_registration:
if not werkzeug.serving.is_running_from_reloader():
debug(
'register_background_thread: not running inside Flask so not starting thread')
return
if search_thread:
debug(
'register_background_thread: initial registration requested but thread already running')
return
debug('register_background_thread: initial registration')
search_thread = threading.Thread(target=main)
else:
debug('register_background_thread: queueing')
search_thread = threading.Timer(30, main) # delay, in seconds
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
""" Gets all the PokeMarkers via REST """
return json.dumps(get_pokemarkers())
@app.route('/raw_data')
def raw_data():
""" Gets raw data for pokemons/gyms/pokestops via REST """
return flask.jsonify(pokemons=pokemons, gyms=gyms, pokestops=pokestops)
@app.route('/config')
def config():
""" Gets the settings for the Google Maps via REST"""
center = {
'lat': FLOAT_LAT,
'lng': FLOAT_LONG,
'zoom': 15,
'identifier': "fullmap"
}
return json.dumps(center)
@app.route('/')
def fullmap():
clear_stale_pokemons()
return render_template(
'example_fullmap.html', key=GOOGLEMAPS_KEY, fullmap=get_map(), auto_refresh=auto_refresh)
@app.route('/next_loc')
def next_loc():
global NEXT_LAT, NEXT_LONG
lat = flask.request.args.get('lat', '')
lon = flask.request.args.get('lon', '')
if not (lat and lon):
print('[-] Invalid next location: %s,%s' % (lat, lon))
else:
print('[+] Saved next location as %s,%s' % (lat, lon))
NEXT_LAT = float(lat)
NEXT_LONG = float(lon)
return 'ok'
def get_pokemarkers():
pokeMarkers = [{
'icon': icons.dots.red,
'lat': origin_lat,
'lng': origin_lon,
'infobox': "Start position",
'type': 'custom',
'key': 'start-position',
'disappear_time': -1
}]
for pokemon_key in pokemons:
pokemon = pokemons[pokemon_key]
datestr = datetime.fromtimestamp(pokemon[
'disappear_time'])
dateoutput = datestr.strftime("%H:%M:%S")
if is_ampm_clock:
dateoutput = datestr.strftime("%I:%M%p").lstrip('0')
pokemon['disappear_time_formatted'] = dateoutput
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(**pokemon)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
pokeMarkers.append({
'type': 'pokemon',
'key': pokemon_key,
'disappear_time': pokemon['disappear_time'],
'icon': 'static/icons/%d.png' % pokemon["id"],
'lat': pokemon["lat"],
'lng': pokemon["lng"],
'infobox': label
})
for gym_key in gyms:
gym = gyms[gym_key]
if gym[0] == 0:
color = "rgba(0,0,0,.4)"
if gym[0] == 1:
color = "rgba(74, 138, 202, .6)"
if gym[0] == 2:
color = "rgba(240, 68, 58, .6)"
if gym[0] == 3:
color = "rgba(254, 217, 40, .6)"
icon = 'static/forts/'+numbertoteam[gym[0]]+'_large.png'
pokeMarkers.append({
'icon': 'static/forts/' + numbertoteam[gym[0]] + '.png',
'type': 'gym',
'key': gym_key,
'disappear_time': -1,
'lat': gym[1],
'lng': gym[2],
'infobox': "<div><center><small>Gym owned by:</small><br><b style='color:" + color + "'>Team " + numbertoteam[gym[0]] + "</b><br><img id='" + numbertoteam[gym[0]] + "' height='100px' src='"+icon+"'><br>Prestige: " + str(gym[3]) + "</center>"
})
for stop_key in pokestops:
stop = pokestops[stop_key]
if stop[2] > 0:
pokeMarkers.append({
'type': 'lured_stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/PstopLured.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Lured Pokestop, expires at ' + stop[2],
})
else:
pokeMarkers.append({
'type': 'stop',
'key': stop_key,
'disappear_time': -1,
'icon': 'static/forts/Pstop.png',
'lat': stop[0],
'lng': stop[1],
'infobox': 'Pokestop',
})
return pokeMarkers
def get_map():
fullmap = Map(
identifier="fullmap2",
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=origin_lat,
lng=origin_lon,
markers=get_pokemarkers(),
zoom='17', )
return fullmap
if __name__ == '__main__':
args = get_args()
register_background_thread(initial_registration=True)
app.run(debug=True, threaded=True, host=args.host, port=args.port)
| rubenmak/PokemonGo-SlackBot | pokeslack.py | Python | mit | 41,898 |
from ..base import HaravanResource
from haravan import mixins
class Customer(HaravanResource, mixins.Metafields):
@classmethod
def search(cls, **kwargs):
"""
Search for customers matching supplied query
Args:
q: Text to search for customers ("q" is short for query)
f: Filters to apply to customers ("f" is short for query)
page: Page to show (default: 1)
limit: Maximum number of results to show (default: 50, maximum: 250)
Returns:
An array of customers.
"""
return cls._build_list(cls.get("search", **kwargs))
| Haravan/haravan_python_api | haravan/resources/customer.py | Python | mit | 628 |
# -*- coding: utf-8 -*-
"""Parse Archimate XML Exchange File Format into a MongoDB DB""" | RafaAguilar/archi2mongodb | archimate2mongodb/pkg/utils/__init__.py | Python | mit | 88 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-19 08:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='url',
name='counter',
field=models.PositiveIntegerField(default=0, editable=False, verbose_name='Количество переходов'),
),
]
| tmnkv/shrt | shrt/core/migrations/0002_auto_20161019_1154.py | Python | mit | 514 |
#!/usr/bin/env python3
import argparse
import json
from . import networks
from .models import Serializable
parser = argparse.ArgumentParser(prog='choo')
parser.add_argument('--pretty', action='store_true', help='pretty-print output JSON')
parser.add_argument('network', metavar='network', help='network to use, e.g. vrr', choices=networks.__all__)
parser.add_argument('query', help='any Searchable or Searchable.Request as JSON')
args = parser.parse_args()
network = getattr(networks, args.network)
result = Serializable.serialize(network.query(Serializable.unserialize(json.loads(args.query))))
if args.pretty:
print(json.dumps(result, indent=2, separators=(',', ': ')))
else:
print(json.dumps(result, separators=(',', ':')))
| NoMoKeTo/transit | src/choo/cli.py | Python | apache-2.0 | 738 |
import hashlib
from twisted.internet.defer import inlineCallbacks
from Tribler.Test.Community.AbstractTestCommunity import AbstractTestCommunity
from Tribler.Test.Core.base_test import MockObject
from Tribler.community.market.community import MarketCommunity, ProposedTradeRequestCache
from Tribler.community.market.core.message import TraderId, MessageId, MessageNumber
from Tribler.community.market.core.order import OrderId, OrderNumber, Order
from Tribler.community.market.core.price import Price
from Tribler.community.market.core.quantity import Quantity
from Tribler.community.market.core.tick import Ask, Bid, Tick, EMPTY_SIG
from Tribler.community.market.core.timeout import Timeout
from Tribler.community.market.core.timestamp import Timestamp
from Tribler.community.market.core.trade import Trade, CounterTrade
from Tribler.community.market.core.ttl import Ttl
from Tribler.community.market.wallet.dummy_wallet import DummyWallet1, DummyWallet2
from Tribler.dispersy.candidate import Candidate, WalkCandidate
from Tribler.dispersy.crypto import ECCrypto
from Tribler.dispersy.member import Member
from Tribler.dispersy.message import DelayMessageByProof, Message, DropMessage
from Tribler.dispersy.util import blocking_call_on_reactor_thread
class CommunityTestSuite(AbstractTestCommunity):
"""Community test cases."""
@blocking_call_on_reactor_thread
@inlineCallbacks
def setUp(self, annotate=True):
yield super(CommunityTestSuite, self).setUp(annotate=annotate)
dummy1_wallet = DummyWallet1()
dummy2_wallet = DummyWallet2()
self.market_community = MarketCommunity(self.dispersy, self.master_member, self.member)
self.market_community.initialize(wallets={dummy1_wallet.get_identifier(): dummy1_wallet,
dummy2_wallet.get_identifier(): dummy2_wallet}, use_database=False)
self.market_community.use_local_address = True
self.dispersy._lan_address = ("127.0.0.1", 1234)
self.dispersy._endpoint.open(self.dispersy)
self.dispersy.attach_community(self.market_community)
eccrypto = ECCrypto()
ec = eccrypto.generate_key(u"curve25519")
member = Member(self.dispersy, ec, 1)
trader_id = hashlib.sha1(member.public_key).digest().encode('hex')
self.ask = Ask(MessageId(TraderId('0'), MessageNumber('message_number')),
OrderId(TraderId(trader_id), OrderNumber(1234)), Price(63400, 'DUM1'), Quantity(30, 'DUM2'),
Timeout(3600), Timestamp.now())
self.ask.sign(member)
self.bid = Bid(MessageId(TraderId('1'), MessageNumber('message_number')),
OrderId(TraderId(trader_id), OrderNumber(1235)), Price(343, 'DUM1'), Quantity(22, 'DUM2'),
Timeout(3600), Timestamp.now())
self.bid.sign(member)
self.order = Order(OrderId(TraderId(self.market_community.mid), OrderNumber(24)), Price(20, 'DUM1'),
Quantity(30, 'DUM2'), Timeout(3600.0), Timestamp.now(), False)
self.proposed_trade = Trade.propose(MessageId(TraderId('0'), MessageNumber('message_number')),
OrderId(TraderId('0'), OrderNumber(23)),
OrderId(TraderId(self.market_community.mid), OrderNumber(24)),
Price(20, 'DUM1'), Quantity(30, 'DUM2'), Timestamp.now())
@blocking_call_on_reactor_thread
def test_get_master_members(self):
"""
Test retrieval of the master members of the Market community
"""
self.assertTrue(MarketCommunity.get_master_members(self.dispersy))
@blocking_call_on_reactor_thread
def test_proposed_trade_cache_timeout(self):
"""
Test the timeout method of a proposed trade request in the cache
"""
ask = Ask(MessageId(TraderId('0'), MessageNumber('message_number')),
OrderId(TraderId(self.market_community.mid), OrderNumber(24)),
Price(63400, 'DUM1'), Quantity(30, 'DUM2'), Timeout(3600), Timestamp.now())
order = Order(OrderId(TraderId("0"), OrderNumber(23)), Price(20, 'DUM1'), Quantity(30, 'DUM2'),
Timeout(3600.0), Timestamp.now(), False)
self.market_community.order_book.insert_ask(ask)
self.assertEqual(len(self.market_community.order_book.asks), 1)
self.market_community.order_manager.order_repository.add(order)
order.reserve_quantity_for_tick(self.proposed_trade.recipient_order_id, Quantity(30, 'DUM2'))
self.market_community.order_manager.order_repository.update(order)
cache = ProposedTradeRequestCache(self.market_community, self.proposed_trade)
cache.on_timeout()
self.assertEqual(len(self.market_community.order_book.asks), 0)
def get_tick_message(self, tick):
meta = self.market_community.get_meta_message(u"ask" if isinstance(tick, Ask) else u"bid")
return meta.impl(
authentication=(self.market_community.my_member,),
distribution=(self.market_community.claim_global_time(),),
payload=tick.to_network() + (Ttl.default(), "127.0.0.1", 1234)
)
def get_offer_sync(self, tick):
meta = self.market_community.get_meta_message(u"offer-sync")
candidate = Candidate(self.market_community.lookup_ip(TraderId(self.market_community.mid)), False)
return meta.impl(
authentication=(self.market_community.my_member,),
distribution=(self.market_community.claim_global_time(),),
destination=(candidate,),
payload=tick.to_network() + (Ttl(1),) + ("127.0.0.1", 1234) + (isinstance(tick, Ask),)
)
@blocking_call_on_reactor_thread
def test_verify_offer_creation(self):
"""
Test creation of an offer in the community
"""
self.assertRaises(RuntimeError, self.market_community.verify_offer_creation,
Price(3, 'MC'), 'ABC', Quantity(4, 'BTC'), 'ABC')
self.assertRaises(RuntimeError, self.market_community.verify_offer_creation,
Price(3, 'MC'), 'ABC', Quantity(4, 'BTC'), 'MC')
self.assertRaises(RuntimeError, self.market_community.verify_offer_creation,
Price(1, 'DUM1'), 'DUM1', Quantity(1, 'BTC'), 'BTC')
self.assertRaises(RuntimeError, self.market_community.verify_offer_creation,
Price(0.1, 'DUM1'), 'DUM1', Quantity(1, 'DUM2'), 'DUM2')
self.assertRaises(RuntimeError, self.market_community.verify_offer_creation,
Price(1, 'DUM1'), 'DUM1', Quantity(0.1, 'DUM2'), 'DUM2')
@blocking_call_on_reactor_thread
def test_check_message(self):
"""
Test the general check of the validity of a message in the market community
"""
self.market_community.update_ip(TraderId(self.market_community.mid), ('2.2.2.2', 2))
proposed_trade_msg = self.get_proposed_trade_msg()
self.market_community.timeline.check = lambda _: (True, None)
[self.assertIsInstance(msg, Message.Implementation)
for msg in self.market_community.check_message([proposed_trade_msg])]
self.market_community.timeline.check = lambda _: (False, None)
[self.assertIsInstance(msg, DelayMessageByProof)
for msg in self.market_community.check_message([proposed_trade_msg])]
@blocking_call_on_reactor_thread
def test_check_tick_message(self):
"""
Test the general check of the validity of a tick message in the market community
"""
self.ask._signature = EMPTY_SIG
[self.assertIsInstance(msg, DropMessage) for msg in
self.market_community.check_tick_message([self.get_tick_message(self.ask)])]
self.market_community.timeline.check = lambda _: (False, None)
[self.assertIsInstance(msg, DelayMessageByProof) for msg in
self.market_community.check_tick_message([self.get_tick_message(self.ask)])]
self.market_community.timeline.check = lambda _: (True, None)
self.ask.order_id._trader_id = TraderId(self.market_community.mid)
[self.assertIsInstance(msg, DropMessage) for msg in
self.market_community.check_tick_message([self.get_tick_message(self.ask)])]
@blocking_call_on_reactor_thread
def test_check_trade_message(self):
"""
Test the general check of the validity of a trade message in the market community
"""
self.proposed_trade.recipient_order_id._trader_id = TraderId("abcdef")
self.market_community.update_ip(TraderId(self.market_community.mid), ('2.2.2.2', 2))
self.market_community.update_ip(TraderId("abcdef"), ('2.2.2.2', 2))
self.market_community.timeline.check = lambda _: (False, None)
[self.assertIsInstance(msg, DelayMessageByProof) for msg in
self.market_community.check_trade_message([self.get_proposed_trade_msg()])]
self.market_community.timeline.check = lambda _: (True, None)
[self.assertIsInstance(msg, DropMessage) for msg in
self.market_community.check_trade_message([self.get_proposed_trade_msg()])]
self.proposed_trade.recipient_order_id._trader_id = TraderId(self.market_community.mid)
self.market_community.timeline.check = lambda _: (True, None)
[self.assertIsInstance(msg, DropMessage) for msg in
self.market_community.check_trade_message([self.get_proposed_trade_msg()])]
self.market_community.order_manager.order_repository.add(self.order)
self.market_community.timeline.check = lambda _: (True, None)
[self.assertIsInstance(msg, Message.Implementation) for msg in
self.market_community.check_trade_message([self.get_proposed_trade_msg()])]
@blocking_call_on_reactor_thread
def test_send_offer_sync(self):
"""
Test sending an offer sync
"""
self.market_community.update_ip(TraderId('0'), ("127.0.0.1", 1234))
self.market_community.update_ip(TraderId('1'), ("127.0.0.1", 1234))
self.market_community.update_ip(self.ask.order_id.trader_id, ("127.0.0.1", 1234))
candidate = WalkCandidate(("127.0.0.1", 1234), False, ("127.0.0.1", 1234), ("127.0.0.1", 1234), u"public")
self.assertTrue(self.market_community.send_offer_sync(candidate, self.ask))
@blocking_call_on_reactor_thread
def test_send_proposed_trade(self):
"""
Test sending a proposed trade
"""
self.market_community.update_ip(TraderId(self.market_community.mid), ('127.0.0.1', 1234))
self.assertEqual(self.market_community.send_proposed_trade_messages([self.proposed_trade]), [True])
@blocking_call_on_reactor_thread
def test_send_counter_trade(self):
"""
Test sending a counter trade
"""
self.market_community.update_ip(TraderId('b'), ('127.0.0.1', 1234))
counter_trade = CounterTrade(MessageId(TraderId('a'), MessageNumber('2')), self.order.order_id,
OrderId(TraderId('b'), OrderNumber(3)), 1235, Price(3, 'MC'), Quantity(4, 'BTC'),
Timestamp.now())
self.market_community.send_counter_trade(counter_trade)
@blocking_call_on_reactor_thread
def test_start_transaction(self):
"""
Test the start transaction method
"""
self.market_community.order_manager.order_repository.add(self.order)
self.market_community.update_ip(TraderId('0'), ("127.0.0.1", 1234))
self.market_community.start_transaction(self.proposed_trade)
self.assertEqual(len(self.market_community.transaction_manager.find_all()), 1)
@blocking_call_on_reactor_thread
def test_create_intro_request(self):
"""
Test the creation of an introduction request
"""
self.market_community.order_book.insert_ask(self.ask)
self.market_community.order_book.insert_bid(self.bid)
candidate = WalkCandidate(("127.0.0.1", 1234), False, ("127.0.0.1", 1234), ("127.0.0.1", 1234), u"public")
request = self.market_community.create_introduction_request(candidate, True)
self.assertTrue(request)
self.assertTrue(request.payload.orders_bloom_filter)
@blocking_call_on_reactor_thread
def test_on_introduction_request(self):
"""
Test that when we receive an intro request with a orders bloom filter, we send an order sync back
"""
def on_send_offer_sync(_, tick):
self.assertIsInstance(tick, Tick)
on_send_offer_sync.called = True
on_send_offer_sync.called = False
candidate = WalkCandidate(("127.0.0.1", 1234), False, ("127.0.0.1", 1234), ("127.0.0.1", 1234), u"public")
candidate.associate(self.market_community.my_member)
payload = self.market_community.create_introduction_request(candidate, True).payload
self.market_community.order_book.insert_ask(self.ask)
self.market_community.order_book.insert_bid(self.bid)
self.market_community.update_ip(TraderId('0'), ("127.0.0.1", 1234))
self.market_community.update_ip(TraderId('1'), ("127.0.0.1", 1234))
self.market_community.send_offer_sync = on_send_offer_sync
message = MockObject()
message.payload = payload
message.candidate = candidate
self.market_community.on_introduction_request([message])
self.assertTrue(on_send_offer_sync.called)
@blocking_call_on_reactor_thread
def test_lookup_ip(self):
# Test for lookup ip
self.market_community.update_ip(TraderId('0'), ("1.1.1.1", 0))
self.assertEquals(("1.1.1.1", 0), self.market_community.lookup_ip(TraderId('0')))
@blocking_call_on_reactor_thread
def test_get_wallet_address(self):
"""
Test the retrieval of a wallet address
"""
self.assertRaises(ValueError, self.market_community.get_wallet_address, 'ABCD')
self.assertTrue(self.market_community.get_wallet_address('DUM1'))
@blocking_call_on_reactor_thread
def test_create_ask(self):
# Test for create ask
self.assertRaises(RuntimeError, self.market_community.create_ask, 20, 'DUM2', 100, 'DUM2', 0.0)
self.assertRaises(RuntimeError, self.market_community.create_ask, 20, 'NOTEXIST', 100, 'DUM2', 0.0)
self.assertRaises(RuntimeError, self.market_community.create_ask, 20, 'DUM2', 100, 'NOTEXIST', 0.0)
self.assertTrue(self.market_community.create_ask(20, 'DUM1', 100, 'DUM2', 3600))
self.assertEquals(1, len(self.market_community.order_book._asks))
self.assertEquals(0, len(self.market_community.order_book._bids))
@blocking_call_on_reactor_thread
def test_on_tick(self):
"""
Test whether a tick is inserted in the order book when we receive one
"""
self.market_community.on_tick([self.get_tick_message(self.ask), self.get_tick_message(self.bid)])
self.assertEquals(1, len(self.market_community.order_book.asks))
self.assertEquals(1, len(self.market_community.order_book.bids))
# Update the timestamp of the ticks
ask_timestamp = float(self.ask.timestamp)
self.ask.update_timestamp()
self.market_community.on_tick([self.get_tick_message(self.ask)])
self.assertEquals(1, len(self.market_community.order_book.asks))
new_timestamp = self.market_community.order_book.get_tick(self.ask.order_id).tick.timestamp
self.assertGreater(new_timestamp, ask_timestamp)
@blocking_call_on_reactor_thread
def test_create_bid(self):
# Test for create bid
self.assertRaises(RuntimeError, self.market_community.create_bid, 20, 'DUM2', 100, 'DUM2', 0.0)
self.assertRaises(RuntimeError, self.market_community.create_bid, 20, 'NOTEXIST', 100, 'DUM2', 0.0)
self.assertRaises(RuntimeError, self.market_community.create_bid, 20, 'DUM2', 100, 'NOTEXIST', 0.0)
self.assertTrue(self.market_community.create_bid(20, 'DUM1', 100, 'DUM2', 3600))
self.assertEquals(0, len(self.market_community.order_book.asks))
self.assertEquals(1, len(self.market_community.order_book.bids))
def get_proposed_trade_msg(self):
destination, payload = self.proposed_trade.to_network()
payload += ("127.0.0.1", 1234)
candidate = Candidate(self.market_community.lookup_ip(destination), False)
meta = self.market_community.get_meta_message(u"proposed-trade")
message = meta.impl(
authentication=(self.market_community.my_member,),
distribution=(self.market_community.claim_global_time(),),
destination=(candidate,),
payload=payload
)
return message
@blocking_call_on_reactor_thread
def test_on_proposed_trade_accept(self):
"""
Test whether we accept a trade when we receive a correct proposed trade message
"""
def mocked_start_transaction(*_):
mocked_start_transaction.called = True
mocked_start_transaction.called = False
self.market_community.update_ip(TraderId(self.market_community.mid), ('2.2.2.2', 2))
self.market_community.start_transaction = mocked_start_transaction
self.market_community.order_manager.order_repository.add(self.order)
self.market_community.on_proposed_trade([self.get_proposed_trade_msg()])
self.assertTrue(mocked_start_transaction.called)
@blocking_call_on_reactor_thread
def test_on_proposed_trade_decline(self):
"""
Test whether we decline a trade when we receive an invalid proposed trade message
"""
def mocked_send_decline_trade(*_):
mocked_send_decline_trade.called = True
mocked_send_decline_trade.called = False
self.market_community.update_ip(TraderId(self.market_community.mid), ('2.2.2.2', 2))
self.market_community.send_declined_trade = mocked_send_decline_trade
self.market_community.order_manager.order_repository.add(self.order)
self.proposed_trade._price = Price(900, 'DUM1')
self.market_community.on_proposed_trade([self.get_proposed_trade_msg()])
self.assertTrue(mocked_send_decline_trade.called)
@blocking_call_on_reactor_thread
def test_on_proposed_trade_counter(self):
"""
Test whether we send a counter trade when we receive a proposed trade message
"""
def mocked_send_counter_trade(*_):
mocked_send_counter_trade.called = True
mocked_send_counter_trade.called = False
self.market_community.update_ip(TraderId(self.market_community.mid), ('2.2.2.2', 2))
self.market_community.send_counter_trade = mocked_send_counter_trade
self.market_community.order_manager.order_repository.add(self.order)
self.proposed_trade._quantity = Quantity(100000, 'DUM2')
self.market_community.on_proposed_trade([self.get_proposed_trade_msg()])
self.assertTrue(mocked_send_counter_trade.called)
@blocking_call_on_reactor_thread
def test_on_offer_sync(self):
"""
Test whether the right operations happen when we receive an offer sync
"""
self.assertEqual(len(self.market_community.order_book.asks), 0)
self.assertEqual(len(self.market_community.order_book.bids), 0)
self.market_community.update_ip(TraderId(self.market_community.mid), ('2.2.2.2', 2))
self.market_community.on_offer_sync([self.get_offer_sync(self.ask)])
self.assertEqual(len(self.market_community.order_book.asks), 1)
self.market_community.order_book.remove_tick(self.ask.order_id)
self.market_community.on_offer_sync([self.get_offer_sync(self.bid)])
self.assertEqual(len(self.market_community.order_book.bids), 1)
@blocking_call_on_reactor_thread
def test_compute_reputation(self):
"""
Test the compute_reputation method
"""
self.market_community.tradechain_community = MockObject()
self.market_community.tradechain_community.persistence = MockObject()
self.market_community.tradechain_community.persistence.get_all_blocks = lambda: []
self.market_community.compute_reputation()
self.assertFalse(self.market_community.reputation_dict)
@blocking_call_on_reactor_thread
def test_abort_transaction(self):
"""
Test aborting a transaction
"""
self.order.reserve_quantity_for_tick(OrderId(TraderId('0'), OrderNumber(23)), Quantity(30, 'DUM2'))
self.market_community.order_manager.order_repository.add(self.order)
self.market_community.update_ip(TraderId('0'), ("127.0.0.1", 1234))
self.market_community.start_transaction(self.proposed_trade)
transaction = self.market_community.transaction_manager.find_all()[0]
self.assertTrue(transaction)
self.assertEqual(self.order.reserved_quantity, Quantity(30, 'DUM2'))
self.market_community.abort_transaction(transaction)
order = self.market_community.order_manager.order_repository.find_by_id(transaction.order_id)
self.assertEqual(order.reserved_quantity, Quantity(0, 'DUM2'))
| vandenheuvel/tribler | Tribler/Test/Community/Market/test_community.py | Python | lgpl-3.0 | 21,400 |
######################################################################
#
# FSP3000R7AmplifierMib modeler plugin
#
# Copyright (C) 2011 Russell Dwarshuis, Merit Network, Inc.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
######################################################################
__doc__="""FSP3000R7AmplifierMib
FSP3000R7AmplifierMib maps EDFA and RAMAN amplifiers on a FSP3000R7 system
"""
from ZenPacks.Merit.AdvaFSP3000R7.lib.FSP3000R7MibCommon import FSP3000R7MibCommon
class FSP3000R7AmplifierMib(FSP3000R7MibCommon):
modname = "ZenPacks.Merit.AdvaFSP3000R7.FSP3000R7Amplifier"
relname = "FSP3000R7Amp"
# ADVA amplifier cards that will be detected by this modeler
# They must respond to the same performance monitoring MIBs
componentModels = [ 'EDFA-C-S10',
'EDFA-C-S18-GCB',
'EDFA-C-S18-GC',
'EDFA-C-S20-GCB',
'EDFA-C-D20-VGC',
'EDFA-C-D20-GC',
'EDFA-C-D17-GC',
'EDFA-L-D17-GC',
'EDFA-C-S20-GCB-DM',
'EDFA-C-D20-VGC-DM',
'EDFA-C-D20-VLGC-DM',
'EDFA-C-D27-GCB-DM',
'EDFA-C-S26-VGC-DM',
'EDFA-C-S26-VGCB-DM',
'AMP-S20H-C15',
'AMP-S20L-C15' ]
| kb8u/ZenPacks.Merit.AdvaFSP3000R7 | ZenPacks/Merit/AdvaFSP3000R7/modeler/plugins/Adva/FSP3000R7AmplifierMib.py | Python | gpl-2.0 | 1,541 |
from _Framework.Control import PlayableControl
class PadControl(PlayableControl):
class State(PlayableControl.State):
def __init__(self, control = None, manager = None, *a, **k):
super(PadControl.State, self).__init__(control, manager, *a, **k)
self._sensitivity_profile = None
def _get_sensitivity_profile(self):
return self._sensitivity_profile
def _set_sensitivity_profile(self, value):
self._sensitivity_profile = value
self._update_sensitivity()
sensitivity_profile = property(_get_sensitivity_profile, _set_sensitivity_profile)
def set_control_element(self, control_element):
super(PadControl.State, self).set_control_element(control_element)
self._update_sensitivity()
def _update_sensitivity(self):
if self._control_element and self._sensitivity_profile:
self._control_element.sensitivity_profile = self._sensitivity_profile
def __init__(self, *a, **k):
super(PadControl, self).__init__(*a, **k) | LividInstruments/LiveRemoteScripts | _Mono_Framework/_deprecated/PadControl.py | Python | mit | 1,089 |
from model.project import Project
import random
import string
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*5
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
'''def test_delete_project(app):
if app.project.count() == 0:
project = Project(name=random_string("name", 10), description=random_string("description", 20))
app.project.create(project)
old_projects = app.project.get_project_list()
project = random.choice(old_projects)
app.project.delete(project.id)
new_projects = app.project.get_project_list()
assert len(old_projects) - 1 == len(new_projects)
old_projects.remove(project)
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max)'''
def test_delete_project(app):
username = "administrator"
password = "root"
if app.project.count() == 0:
app.project.create()
old_projects = app.soap.soap_project_list(username, password)
project = random.choice(old_projects)
app.project.delete_project_by_id(project.id)
new_projects = app.soap.soap_project_list(username, password)
assert len(old_projects) - 1 == len(new_projects)
old_projects.remove(project)
assert sorted(old_projects, key=Project.id_or_max) == sorted(new_projects, key=Project.id_or_max) | Spike96/Pyhton_Training_Mantis | test/test_delete_project.py | Python | apache-2.0 | 1,388 |
"""yeelight conftest."""
from tests.components.light.conftest import mock_light_profiles # noqa
| sdague/home-assistant | tests/components/yeelight/conftest.py | Python | apache-2.0 | 97 |
from django.conf import settings
from django.core.urlresolvers import resolve
from .. import urls_tips
RAW_SUBDOMAIN_HOSTS = ['tips.pinecast.com', 'tips.pinecast.dev']
if settings.STAGING:
RAW_SUBDOMAIN_HOSTS.append('tips.next.pinecast.com')
class TipsSubdomainMiddleware(object):
def process_request(self, req):
domain = req.META.get('HTTP_HOST') or req.META.get('SERVER_NAME')
if settings.DEBUG and ':' in domain:
domain = domain[:domain.index(':')]
if domain not in RAW_SUBDOMAIN_HOSTS:
return None
path = req.get_full_path()
path_to_resolve = path if '?' not in path else path[:path.index('?')]
func, args, kwargs = resolve(path_to_resolve, urls_tips)
req.META['tips_site_hostname'] = True
return func(req, *args, **kwargs)
| Pinecast/pinecast | payments/middleware/tips_site.py | Python | apache-2.0 | 830 |
from .main import Sabnzbd
def start():
return Sabnzbd()
config = [{
'name': 'sabnzbd',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'sabnzbd',
'label': 'Sabnzbd',
'description': 'Use <a href="http://sabnzbd.org/" target="_blank">SABnzbd</a> (0.7+) to download NZBs.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'nzb',
},
{
'name': 'host',
'default': 'localhost:8080',
},
{
'name': 'api_key',
'label': 'Api Key',
'description': 'Used for all calls to Sabnzbd.',
},
{
'name': 'category',
'label': 'Category',
'description': 'The category CP places the nzb in. Like <strong>movies</strong> or <strong>couchpotato</strong>',
},
{
'name': 'manual',
'default': False,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
| coolbombom/CouchPotatoServer | couchpotato/core/downloaders/sabnzbd/__init__.py | Python | gpl-3.0 | 1,774 |
from django.db import models
from django.contrib.auth.models import User
class UserProfile(models.Model):
# Links UserProfile to a User model instance.
user = models.OneToOneField(User)
# The additional attributes.
website = models.URLField(blank=True)
# picture = models.ImageField(upload_to='profile_images', blank=True)
def __unicode__(self):
return self.user.username
class CommonModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Organization(CommonModel):
"""i.e. Mirman. To group the quizzes."""
name = models.CharField(max_length=150, unique=True)
def __unicode__(self):
return self.name
class Quiz(CommonModel):
"""Groups the questions."""
user = models.ForeignKey(User)
name = models.CharField(max_length=100)
organization = models.ForeignKey(Organization, blank=True, null=True)
# version = models.PositiveIntegerField(default=1)
class Meta:
unique_together = ("user", "name")
def __unicode__(self):
return self.name
class QuizLog(CommonModel):
"""Log with foreign keys"""
quiz = models.ForeignKey(Quiz)
taker = models.ForeignKey(User, null=True)
MESSAGE_CHOICES = (('STARTED', 'STARTED'), ('COMPLETED', 'COMPLETED'))
message = models.CharField(max_length=10, choices=MESSAGE_CHOICES)
class RawLog(CommonModel):
"""Log without foreign keys"""
message = models.CharField(max_length=100)
def __unicode__(self):
return u"%s %s" % (self.created_at.strftime('%c'),
self.message)
class Question(CommonModel):
quiz = models.ForeignKey(Quiz)
text = models.CharField(max_length=255,
verbose_name='question text',)
class Meta:
unique_together = ("text", "quiz")
@property
def answers(self):
return self.answer_set.all()
@property
def tags(self):
# return self.questiontag_set.all()
return [qt.tag for qt in self.questiontag_set.all()]
def __unicode__(self):
return self.text
class Answer(CommonModel):
question = models.ForeignKey(Question)
text = models.CharField(max_length=50)
correct = models.BooleanField(default=False)
notes = models.CharField(max_length=100, blank=True)
def __unicode__(self):
return self.text
class Meta:
unique_together = ("question", "text")
class Tag(CommonModel):
text = models.CharField(max_length=25, unique=True)
def __unicode__(self):
return self.text
class QuestionTag(CommonModel):
question = models.ForeignKey(Question)
tag = models.ForeignKey(Tag)
def __unicode__(self):
return "{}...{}".format(self.question.text[:30], self.tag.text)
class Meta:
unique_together = ("question", "tag")
| hillscottc/quiz2 | quiz2/apps/quiz/models.py | Python | agpl-3.0 | 2,939 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
class Playlists(object):
def playlist_ids(self):
"""
Returns an iterator of all Plex ids of playlists.
"""
self.cursor.execute('SELECT plex_id FROM playlists')
return (x[0] for x in self.cursor)
def kodi_playlist_paths(self):
"""
Returns an iterator of all Kodi playlist paths.
"""
self.cursor.execute('SELECT kodi_path FROM playlists')
return (x[0] for x in self.cursor)
def delete_playlist(self, playlist):
"""
Removes the entry for playlist [Playqueue_Object] from the Plex
playlists table.
Be sure to either set playlist.id or playlist.kodi_path
"""
if playlist.plex_id:
query = 'DELETE FROM playlists WHERE plex_id = ?'
var = playlist.plex_id
elif playlist.kodi_path:
query = 'DELETE FROM playlists WHERE kodi_path = ?'
var = playlist.kodi_path
else:
raise RuntimeError('Cannot delete playlist: %s' % playlist)
self.cursor.execute(query, (var, ))
def add_playlist(self, playlist):
"""
Inserts or modifies an existing entry in the Plex playlists table.
"""
query = '''
INSERT OR REPLACE INTO playlists(
plex_id,
plex_name,
plex_updatedat,
kodi_path,
kodi_type,
kodi_hash)
VALUES (?, ?, ?, ?, ?, ?)
'''
self.cursor.execute(
query,
(playlist.plex_id,
playlist.plex_name,
playlist.plex_updatedat,
playlist.kodi_path,
playlist.kodi_type,
playlist.kodi_hash))
def playlist(self, playlist, plex_id=None, path=None):
"""
Returns a complete Playlist (empty one passed in via playlist) for the
entry with plex_id OR kodi_path.
Returns None if not found
"""
query = 'SELECT * FROM playlists WHERE %s = ? LIMIT 1'
if plex_id:
query = query % 'plex_id'
var = plex_id
elif path:
query = query % 'kodi_path'
var = path
self.cursor.execute(query, (var, ))
answ = self.cursor.fetchone()
if not answ:
return
playlist.plex_id = answ[0]
playlist.plex_name = answ[1]
playlist.plex_updatedat = answ[2]
playlist.kodi_path = answ[3]
playlist.kodi_type = answ[4]
playlist.kodi_hash = answ[5]
return playlist
def all_kodi_paths(self):
"""
Returns a generator for all kodi_paths of all synched playlists
"""
self.cursor.execute('SELECT kodi_path FROM playlists')
return (x[0] for x in self.cursor)
def wipe_playlists(self):
"""
Deletes all entries in the playlists table
"""
self.cursor.execute('DELETE FROM playlists')
| croneter/PlexKodiConnect | resources/lib/plex_db/playlists.py | Python | gpl-2.0 | 3,096 |
bind = 'unix:/tmp/gunicorn.sock'
# bind = '127.0.0.1:8080'
workers = 1 | TornikeNatsvlishvili/GeorgianAutoComplete | backend/gunicorn.py | Python | mit | 70 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import http.client
import urllib.parse
class NodeHandlingTest (BitcoinTestFramework):
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
###########################
# RPC disconnectnode test #
###########################
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
| terracoin/terracoin | qa/rpc-tests/nodehandling.py | Python | mit | 3,440 |
from flask.ext.security import current_user
def check_admin():
print("ADMIN CHECK")
print("RESULT = %s" % str((current_user and current_user.has_role('Administrator'))))
return (current_user and current_user.has_role('Administrator'))
| UMDIEEE/ieee-web | IEEETestbankApp/views/admin/admin.py | Python | gpl-3.0 | 248 |
# -*- coding: utf-8 -*-
"""
|openid| Providers
----------------------------------
Providers which implement the |openid|_ protocol based on the
`python-openid`_ library.
.. warning::
This providers are dependent on the |pyopenid|_ package.
.. autosummary::
OpenID
Yahoo
Google
"""
# We need absolute import to import from openid library which has the same
# name as this module
from __future__ import absolute_import
import datetime
import logging
import time
from openid import oidutil
from openid.consumer import consumer
from openid.extensions import ax, pape, sreg
from openid.association import Association
from authomatic import providers
from authomatic.exceptions import FailureError, CancellationError, OpenIDError
__all__ = ['OpenID', 'Yahoo', 'Google']
# Suppress openid logging.
oidutil.log = lambda message, level=0: None
REALM_HTML = \
"""
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="X-XRDS-Location" content="{xrds_location}" />
</head>
<body>{body}</body>
</html>
"""
XRDS_XML = \
"""
<?xml version="1.0" encoding="UTF-8"?>
<xrds:XRDS
xmlns:xrds="xri://$xrds"
xmlns:openid="http://openid.net/xmlns/1.0"
xmlns="xri://$xrd*($v*2.0)">
<XRD>
<Service priority="1">
<Type>http://specs.openid.net/auth/2.0/return_to</Type>
<URI>{return_to}</URI>
</Service>
</XRD>
</xrds:XRDS>
"""
class SessionOpenIDStore(object):
"""
A very primitive session-based implementation of the.
:class:`openid.store.interface.OpenIDStore` interface of the
`python-openid`_ library.
.. warning::
Nonces get verified only by their timeout. Use on your own risk!
"""
@staticmethod
def _log(level, message):
return None
ASSOCIATION_KEY = ('authomatic.providers.openid.SessionOpenIDStore:'
'association')
def __init__(self, session, nonce_timeout=None):
"""
:param int nonce_timeout:
Nonces older than this in seconds will be considered expired.
Default is 600.
"""
self.session = session
self.nonce_timeout = nonce_timeout or 600
def storeAssociation(self, server_url, association):
self._log(logging.DEBUG,
'SessionOpenIDStore: Storing association to session.')
serialized = association.serialize()
decoded = serialized.decode('latin-1')
assoc = decoded
# assoc = serialized
# Always store only one association as a tuple.
self.session[self.ASSOCIATION_KEY] = (server_url, association.handle,
assoc)
def getAssociation(self, server_url, handle=None):
# Try to get association.
assoc = self.session.get(self.ASSOCIATION_KEY)
if assoc and assoc[0] == server_url:
# If found deserialize and return it.
self._log(logging.DEBUG, u'SessionOpenIDStore: Association found.')
return Association.deserialize(assoc[2].encode('latin-1'))
else:
self._log(logging.DEBUG,
u'SessionOpenIDStore: Association not found.')
def removeAssociation(self, server_url, handle):
# Just inform the caller that it's gone.
return True
def useNonce(self, server_url, timestamp, salt):
# Evaluate expired nonces as false.
age = int(time.time()) - int(timestamp)
if age < self.nonce_timeout:
return True
else:
self._log(logging.ERROR, u'SessionOpenIDStore: Expired nonce!')
return False
class OpenID(providers.AuthenticationProvider):
"""
|openid|_ provider based on the `python-openid`_ library.
"""
AX = ['http://axschema.org/contact/email',
'http://schema.openid.net/contact/email',
'http://axschema.org/namePerson',
'http://openid.net/schema/namePerson/first',
'http://openid.net/schema/namePerson/last',
'http://openid.net/schema/gender',
'http://openid.net/schema/language/pref',
'http://openid.net/schema/contact/web/default',
'http://openid.net/schema/media/image',
'http://openid.net/schema/timezone']
AX_REQUIRED = ['http://schema.openid.net/contact/email']
SREG = ['nickname',
'email',
'fullname',
'dob',
'gender',
'postcode',
'country',
'language',
'timezone']
PAPE = [
'http://schemas.openid.net/pape/policies/2007/06/'
'multi-factor-physical',
'http://schemas.openid.net/pape/policies/2007/06/multi-factor',
'http://schemas.openid.net/pape/policies/2007/06/phishing-resistant'
]
def __init__(self, *args, **kwargs):
"""
Accepts additional keyword arguments:
:param store:
Any object which implements
:class:`openid.store.interface.OpenIDStore`
of the `python-openid`_ library.
:param bool use_realm:
Whether to use `OpenID realm
<http://openid.net/specs/openid-authentication-2_0-12.html#realms>`_
If ``True`` the realm HTML document will be accessible at
``{current url}?{realm_param}={realm_param}``
e.g. ``http://example.com/path?realm=realm``.
:param str realm_body:
Contents of the HTML body tag of the realm.
:param str realm_param:
Name of the query parameter to be used to serve the realm.
:param str xrds_param:
The name of the query parameter to be used to serve the
`XRDS document
<http://openid.net/specs/openid-authentication-2_0-12.html#XRDS_Sample>`_.
:param list sreg:
List of strings of optional
`SREG
<http://openid.net/specs/openid-simple-registration-extension-1_0.html>`_
fields.
Default = :attr:`OpenID.SREG`.
:param list sreg_required:
List of strings of required
`SREG
<http://openid.net/specs/openid-simple-registration-extension-1_0.html>`_
fields.
Default = ``[]``.
:param list ax:
List of strings of optional
`AX
<http://openid.net/specs/openid-attribute-exchange-1_0.html>`_
schemas.
Default = :attr:`OpenID.AX`.
:param list ax_required:
List of strings of required
`AX
<http://openid.net/specs/openid-attribute-exchange-1_0.html>`_
schemas.
Default = :attr:`OpenID.AX_REQUIRED`.
:param list pape:
of requested
`PAPE
<http://openid.net/specs/openid-provider-authentication-policy-extension-1_0.html>`_
policies.
Default = :attr:`OpenID.PAPE`.
As well as those inherited from :class:`.AuthenticationProvider`
constructor.
"""
super(OpenID, self).__init__(*args, **kwargs)
# Allow for other openid store implementations.
self.store = self._kwarg(
kwargs, 'store', SessionOpenIDStore(
self.session))
# Realm
self.use_realm = self._kwarg(kwargs, 'use_realm', True)
self.realm_body = self._kwarg(kwargs, 'realm_body', '')
self.realm_param = self._kwarg(kwargs, 'realm_param', 'realm')
self.xrds_param = self._kwarg(kwargs, 'xrds_param', 'xrds')
# SREG
self.sreg = self._kwarg(kwargs, 'sreg', self.SREG)
self.sreg_required = self._kwarg(kwargs, 'sreg_required', [])
# AX
self.ax = self._kwarg(kwargs, 'ax', self.AX)
self.ax_required = self._kwarg(kwargs, 'ax_required', self.AX_REQUIRED)
# add required schemas to schemas if not already there
for i in self.ax_required:
if i not in self.ax:
self.ax.append(i)
# PAPE
self.pape = self._kwarg(kwargs, 'pape', self.PAPE)
@staticmethod
def _x_user_parser(user, data):
user.first_name = data.get('ax', {}).get(
'http://openid.net/schema/namePerson/first')
user.last_name = data.get('ax', {}).get(
'http://openid.net/schema/namePerson/last')
user.id = data.get('guid')
user.link = data.get('ax', {}).get(
'http://openid.net/schema/contact/web/default')
user.picture = data.get('ax', {}).get(
'http://openid.net/schema/media/image')
user.nickname = data.get('sreg', {}).get('nickname')
user.country = data.get('sreg', {}).get('country')
user.postal_code = data.get('sreg', {}).get('postcode')
user.name = data.get('sreg', {}).get('fullname') or \
data.get('ax', {}).get('http://axschema.org/namePerson')
user.gender = data.get('sreg', {}).get('gender') or \
data.get('ax', {}).get('http://openid.net/schema/gender')
user.locale = data.get('sreg', {}).get('language') or \
data.get('ax', {}).get('http://openid.net/schema/language/pref')
user.timezone = data.get('sreg', {}).get('timezone') or \
data.get('ax', {}).get('http://openid.net/schema/timezone')
user.email = data.get('sreg', {}).get('email') or \
data.get('ax', {}).get('http://axschema.org/contact/email') or \
data.get('ax', {}).get('http://schema.openid.net/contact/email')
if data.get('sreg', {}).get('dob'):
user.birth_date = datetime.datetime.strptime(
data.get('sreg', {}).get('dob'),
'%Y-%m-%d'
)
else:
user.birth_date = None
return user
@providers.login_decorator
def login(self):
# Instantiate consumer
self.store._log = self._log
oi_consumer = consumer.Consumer(self.session, self.store)
# handle realm and XRDS if there is only one query parameter
if self.use_realm and len(self.params) == 1:
realm_request = self.params.get(self.realm_param)
xrds_request = self.params.get(self.xrds_param)
else:
realm_request = None
xrds_request = None
# determine type of request
if realm_request:
# =================================================================
# Realm HTML
# =================================================================
self._log(
logging.INFO,
u'Writing OpenID realm HTML to the response.')
xrds_location = '{u}?{x}={x}'.format(u=self.url, x=self.xrds_param)
self.write(
REALM_HTML.format(
xrds_location=xrds_location,
body=self.realm_body))
elif xrds_request:
# =================================================================
# XRDS XML
# =================================================================
self._log(
logging.INFO,
u'Writing XRDS XML document to the response.')
self.set_header('Content-Type', 'application/xrds+xml')
self.write(XRDS_XML.format(return_to=self.url))
elif self.params.get('openid.mode'):
# =================================================================
# Phase 2 after redirect
# =================================================================
self._log(
logging.INFO,
u'Continuing OpenID authentication procedure after redirect.')
# complete the authentication process
response = oi_consumer.complete(self.params, self.url)
# on success
if response.status == consumer.SUCCESS:
data = {}
# get user ID
data['guid'] = response.getDisplayIdentifier()
self._log(logging.INFO, u'Authentication successful.')
# get user data from AX response
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response and ax_response.data:
self._log(logging.INFO, u'Got AX data.')
ax_data = {}
# convert iterable values to their first item
for k, v in ax_response.data.items():
if v and isinstance(v, (list, tuple)):
ax_data[k] = v[0]
data['ax'] = ax_data
# get user data from SREG response
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_response and sreg_response.data:
self._log(logging.INFO, u'Got SREG data.')
data['sreg'] = sreg_response.data
# get data from PAPE response
pape_response = pape.Response.fromSuccessResponse(response)
if pape_response and pape_response.auth_policies:
self._log(logging.INFO, u'Got PAPE data.')
data['pape'] = pape_response.auth_policies
# create user
self._update_or_create_user(data)
# =============================================================
# We're done!
# =============================================================
elif response.status == consumer.CANCEL:
raise CancellationError(
u'User cancelled the verification of ID "{0}"!'.format(
response.getDisplayIdentifier()))
elif response.status == consumer.FAILURE:
raise FailureError(response.message)
elif self.identifier: # As set in AuthenticationProvider.__init__
# =================================================================
# Phase 1 before redirect
# =================================================================
self._log(
logging.INFO,
u'Starting OpenID authentication procedure.')
# get AuthRequest object
try:
auth_request = oi_consumer.begin(self.identifier)
except consumer.DiscoveryFailure as e:
raise FailureError(
u'Discovery failed for identifier {0}!'.format(
self.identifier
),
url=self.identifier,
original_message=e.message)
self._log(
logging.INFO,
u'Service discovery for identifier {0} successful.'.format(
self.identifier))
# add SREG extension
# we need to remove required fields from optional fields because
# addExtension then raises an error
self.sreg = [i for i in self.sreg if i not in self.sreg_required]
auth_request.addExtension(
sreg.SRegRequest(
optional=self.sreg,
required=self.sreg_required)
)
# add AX extension
ax_request = ax.FetchRequest()
# set AX schemas
for i in self.ax:
required = i in self.ax_required
ax_request.add(ax.AttrInfo(i, required=required))
auth_request.addExtension(ax_request)
# add PAPE extension
auth_request.addExtension(pape.Request(self.pape))
# prepare realm and return_to URLs
if self.use_realm:
realm = return_to = '{u}?{r}={r}'.format(
u=self.url, r=self.realm_param)
else:
realm = return_to = self.url
url = auth_request.redirectURL(realm, return_to)
if auth_request.shouldSendRedirect():
# can be redirected
url = auth_request.redirectURL(realm, return_to)
self._log(
logging.INFO,
u'Redirecting user to {0}.'.format(url))
self.redirect(url)
else:
# must be sent as POST
# this writes a html post form with auto-submit
self._log(
logging.INFO,
u'Writing an auto-submit HTML form to the response.')
form = auth_request.htmlMarkup(
realm, return_to, False, dict(
id='openid_form'))
self.write(form)
else:
raise OpenIDError('No identifier specified!')
class Yahoo(OpenID):
"""
Yahoo :class:`.OpenID` provider with the :attr:`.identifier` predefined to
``"me.yahoo.com"``.
"""
identifier = 'me.yahoo.com'
class Google(OpenID):
"""
Google :class:`.OpenID` provider with the :attr:`.identifier` predefined to
``"https://www.google.com/accounts/o8/id"``.
"""
identifier = 'https://www.google.com/accounts/o8/id'
| jasco/authomatic | authomatic/providers/openid.py | Python | mit | 17,262 |
# -*- encoding: utf-8 -*-
# Copyright © 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystonemiddleware.audit as audit_middleware
from oslo_config import cfg
import oslo_middleware.cors as cors_middleware
import pecan
from ironic.api import config
from ironic.api.controllers import base
from ironic.api import hooks
from ironic.api import middleware
from ironic.api.middleware import auth_token
from ironic.common import exception
from ironic.conf import CONF
def get_pecan_config():
# Set up the pecan configuration
filename = config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(pecan_config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.ContextHook(pecan_config.app.acl_public_routes),
hooks.RPCHook(),
hooks.NoExceptionTracebackHook(),
hooks.PublicUrlHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not pecan_config:
pecan_config = get_pecan_config()
pecan.configuration.set_config(dict(pecan_config), overwrite=True)
app = pecan.make_app(
pecan_config.app.root,
static_root=pecan_config.app.static_root,
debug=CONF.pecan_debug,
force_canonical=getattr(pecan_config.app, 'force_canonical', True),
hooks=app_hooks,
wrap_app=middleware.ParsableErrorMiddleware,
)
if CONF.audit.enabled:
try:
app = audit_middleware.AuditMiddleware(
app,
audit_map_file=CONF.audit.audit_map_file,
ignore_req_list=CONF.audit.ignore_req_list
)
except (EnvironmentError, OSError,
audit_middleware.PycadfAuditApiConfigError) as e:
raise exception.InputFileError(
file_name=CONF.audit.audit_map_file,
reason=e
)
if CONF.auth_strategy == "keystone":
app = auth_token.AuthTokenMiddleware(
app, dict(cfg.CONF),
public_api_routes=pecan_config.app.acl_public_routes)
# Create a CORS wrapper, and attach ironic-specific defaults that must be
# included in all CORS responses.
app = cors_middleware.CORS(app, CONF)
app.set_latent(
allow_headers=[base.Version.max_string, base.Version.min_string,
base.Version.string],
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=[base.Version.max_string, base.Version.min_string,
base.Version.string]
)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
self.v1 = setup_app(pecan_config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
| ruyang/ironic | ironic/api/app.py | Python | apache-2.0 | 3,479 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2007 Philippe LAWRENCE
#
# This file is part of pyBar.
# pyBar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyBar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import configparser, os
import Const
class Singleton(object):
def __new__(cls, *args, **kwargs):
if '_inst' not in vars(cls):
cls._inst = object.__new__(cls, *args, **kwargs)
return cls._inst
class UserPrefs(Singleton):
def __init__(self):
file = Const.FILEPREFS
path = Const.PATH
self.config = configparser.ConfigParser()
self.file = os.path.join(path, file)
if os.path.exists(self.file):
with open(self.file) as fp:
self.config.read_file(fp)
fp.close()
else:
self._ini_config_file()
def _ini_config_file(self):
"""Initialise la configuration si le fichier n'est pas trouvé"""
self.config.add_section('Section_w1')
self.config.add_section('Section_w2')
self.config.add_section('Section_w3')
self.config.add_section('Section_units')
self.config.add_section('Section_file')
def get_default_path(self):
try:
return self.config.get('Section_file', 'last_opened')
except configparser.NoOptionError:
return Const.PATH
def save_default_path(self, path):
if not self.config.has_section('Section_file'):
self.config.add_section('Section_file')
self.config.set('Section_file', 'last_opened', '%s' % path)
def get_w1_box(self):
try:
option = self.config.get('Section_w1', 'display_combi_box')
except configparser.NoOptionError:
option = 'on'
if option == 'on':
return True
return False
def get_version(self):
"""Retourne l'option pour la recherche des nouvelles versions"""
try:
option = self.config.getint('Section_w1', 'new_version')
except configparser.NoOptionError:
option = 0
return option
def save_version(self, val):
"""Enregistre l'option pour la recherche de la nouvelle version"""
if val < 0:
val = 0
if not self.config.has_section('Section_w1'):
self.config.add_section('Section_w1')
try:
self.config.set('Section_w1', 'new_version', '%s' % val)
except KeyboardInterrupt:
return
try:
with open(self.file, "w") as fp:
self.config.write(fp)
fp.close()
except IOError:
print("Erreur d'écriture du fichier de préférence")
def get_w1_options(self):
options = {}
options['Node'] = self.get_w1_options1()
options['Barre'] = self.get_w1_options2()
options['Axis'] = self.get_w1_options3()
options['Title'] = self.get_w1_options4()
options['Series'] = self.get_w1_options5()
return options
def get_w1_options1(self):
try:
option = self.config.get('Section_w1', 'display_node_name')
except configparser.NoOptionError:
option = 'on'
if option == 'on':
return True
return False
def get_w1_options2(self):
try:
option = self.config.get('Section_w1', 'display_barre_name')
except configparser.NoOptionError:
option = 'on'
if option == 'on':
return True
return False
def get_w1_options3(self):
try:
option = self.config.get('Section_w1', 'display_axis')
except configparser.NoOptionError:
option = 'off'
if option == 'on':
return True
return False
def get_w1_options4(self):
try:
option = self.config.get('Section_w1', 'display_title')
except configparser.NoOptionError:
option = 'on'
if option == 'on':
return True
return False
def get_w1_options5(self):
try:
option = self.config.get('Section_w1', 'display_series')
except configparser.NoOptionError:
option = 'off'
if option == 'on':
return True
return False
def get_w1_size(self):
try:
return self.config.getint('Section_w1', 'w1_w'), self.config.getint('Section_w1', 'w1_h')
except configparser.NoOptionError:
return None
def get_default_g(self):
try:
return self.config.getfloat('Section_units', "g")
except configparser.NoOptionError:
return Const.G
def get_default_conv(self):
try:
return self.config.getfloat('Section_units', "conv")
except configparser.NoOptionError:
return Const.CONV
def get_default_units(self):
if not self.config.has_section('Section_units'): return {}
di = {}
for i in ['L', 'C', 'E', 'F', 'I', 'M', 'S']:
try:
val = self.config.getfloat('Section_units', i)
except configparser.NoOptionError:
val = 1.
di[i] = val
return di
def save_default_units(self, data):
units = data.unit_conv
g = data.G
if not self.config.has_section('Section_units'):
self.config.add_section('Section_units')
for unit, val in list(units.items()):
self.config.set('Section_units', unit, '%s' % val)
self.config.set('Section_units', "g", '%s' % g)
conv = data.conv
self.config.set('Section_units', "conv", '%s' % conv)
def save_w1_config(self, w, h, display_box, options):
if not self.config.has_section('Section_w1'):
self.config.add_section('Section_w1')
self.config.set('Section_w1', 'w1_w', '%s' % w)
self.config.set('Section_w1', 'w1_h', '%s' % h)
self.config.set('Section_w1', 'display_combi_box', display_box)
has_title = 'on'
if not options.get('Title', 'on'): has_title = 'off'
self.config.set('Section_w1', 'display_title', has_title)
has_node = 'off'
if options.get('Node'): has_node = 'on'
self.config.set('Section_w1', 'display_node_name', has_node)
has_barre = 'off'
if options.get('Barre'): has_barre = 'on'
self.config.set('Section_w1', 'display_barre_name', has_barre)
has_axis = 'off'
if options.get('Axis'):
has_axis = 'on'
self.config.set('Section_w1', 'display_axis', has_axis)
has_series = 'off'
if options.get('Series'):
has_axis = 'on'
self.config.set('Section_w1', 'display_series', has_series)
try:
with open(self.file, "w") as fp:
self.config.write(fp)
fp.close()
except IOError:
print("Erreur d'écriture du fichier de préférence")
def get_w2_size(self):
sizes = (self.config.getint('Section_w2', 'w2_w'),
self.config.getint('Section_w2', 'w2_h'))
return sizes
def save_w2_config(self, w, h):
if not self.config.has_section('Section_w2'):
self.config.add_section('Section_w2')
self.config.set('Section_w2', 'w2_w', '%s' % w)
self.config.set('Section_w2', 'w2_h', '%s' % h)
#print self.config.options('Section_units')
#print self.config.getfloat('Section_units', u'l')
try:
with open(self.file, "w") as fp:
self.config.write(fp)
fp.close()
except IOError:
print("Erreur d'écriture du fichier de préférence")
def get_w3_size(self):
sizes = (self.config.getint('Section_w3', 'w3_w'),
self.config.getint('Section_w3', 'w3_h'))
return sizes
def save_w3_config(self, w, h):
#print 'UP::save_w3_config', w, h
if not self.config.has_section('Section_w3'):
self.config.add_section('Section_w3')
self.config.set('Section_w3', 'w3_w', '%s' % w)
self.config.set('Section_w3', 'w3_h', '%s' % h)
try:
with open(self.file, "w") as fp:
self.config.write(fp)
fp.close()
except IOError:
print("Erreur d'écriture du fichier de préférence")
| Philippe-Lawrence/pyBar | classPrefs.py | Python | gpl-3.0 | 8,101 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.