repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cjbrasher/LipidFinder | LipidFinder/Configuration/LFParametersGUI.py | 1 | 38609 | # Copyright (c) 2019 J. Alvarez-Jarreta and C.J. Brasher
#
# This file is part of the LipidFinder software tool and governed by the
# 'MIT License'. Please see the LICENSE file that should have been
# included as part of this software.
"""Graphical User Interface (GUI) to manage the parameters' collection.
"""
from collections import OrderedDict
import os
from IPython.display import display
from ipywidgets import widgets, Layout
import pandas
from LipidFinder.Configuration import LFParameters
from LipidFinder._utils import normalise_path
class _TaggedToggleButton(widgets.ToggleButton):
"""Add "tag" attribute to widgets.ToggleButton class."""
def __init__(self, tag, **kwargs):
widgets.ToggleButton.__init__(self, **kwargs)
self.tag = tag
class _TaggedCheckbox(widgets.Checkbox):
"""Add "tag" attribute to widgets.Checkbox class."""
def __init__(self, tag, **kwargs):
widgets.Checkbox.__init__(self, **kwargs)
self.tag = tag
class _TaggedButton(widgets.Button):
"""Add "tag" attribute to widgets.Button class."""
def __init__(self, tag, **kwargs):
widgets.Button.__init__(self, **kwargs)
self.tag = tag
class LFParametersGUI(LFParameters):
"""A LFParametersGUI object stores a set of LipidFinder parameters
to be used in the specified module.
This subclass of LFParameters implements a graphical interface using
jupyter notebook's widgets, executed during the object creation. It
allows the user to check, change and save each active parameter's
value interactively.
Attributes:
_parameters (Private[collections.OrderedDict])
Dictionary where the parameters and their associated
information are stored.
_floatPointPrecision (Private[int])
Number of digits after the radix point in floats.
_floatStep (Private[float])
Minimum difference between two consecutive float numbers.
_style (Private[dict])
Dictionary with the default style settings for widgets.
_inputWidth (Private[str])
String representation of the default width of input widgets.
_widgets (Private[collections.OrderedDict])
Dictionary where the widgets for each parameter are stored.
Examples:
LFParametersGUI objects can be created as follows:
>>> from Configuration.LFParametersGUI import
... LFParametersGUI
>>> LFParametersGUI()
>>> LFParametersGUI(src='/home/user/my_parameters.json')
The former will load the default PeakFilter parameters and will
load and display the interface afterwards. The latter will load
the default PeakFilter parameters, override them with the values
found in the JSON file provided, and finally it will load and
display the interface.
Alternatively, a specific module can be introduced as argument:
>>> from Configuration.LFParametersGUI import
... LFParametersGUI
>>> LFParametersGUI(module='mssearch')
"""
def __init__(self, precision=4, **kwargs):
# type: (int, ...) -> LFParametersGUI
"""Constructor of the class LFParametersGUI.
First, the module's parameters template file is loaded. Next, if
a source JSON parameters file path is provided, the default
values are overwritten by the corresponding new (valid) values.
Finally, the graphical user interface is displayed.
Keyword Arguments:
precision -- number of decimal digits to use with floats
(e.g. a precision of 2 forces a difference of
0.01 between any two consecutive float numbers)
[default: 4]
"""
# Minimum difference between two consecutive float numbers
self._floatPointPrecision = precision
self._floatStep = 10 ** -(precision)
# Load the parameters dictionary using parent class' constructor
LFParameters.__init__(self, **kwargs)
# Default style
self._style = {'description_width': '0px'}
# Default width of input widgets
self._inputWidth = '26%'
# Generate an ordered dict to store each parameter's set of
# widgets in the same order as in the parameters' dict
self._widgets = OrderedDict()
# Create every widget of the GUI
for key, data in self._parameters.items():
disabled = not self._is_active(key)
# Load the information of each parameter
self._widgets[key] = [self._create_label(key, disabled),
self._create_help_icon(key, disabled)]
# Create the input widget or container of input widgets for
# each parameter type
if (data['type'] == 'bool'):
self._widgets[key].append(
self._create_bool_widget(key, disabled))
elif (data['type'] == 'int'):
self._widgets[key].append(
self._create_int_widget(key, disabled))
elif (data['type'] == 'float'):
self._widgets[key].append(
self._create_float_widget(key, disabled))
elif (data['type'] == 'selection'):
self._widgets[key].append(
self._create_selection_widget(key, disabled))
elif (data['type'] == 'path'):
self._widgets[key].append(
self._create_path_widget(key, disabled))
elif (data['type'] == 'int range'):
self._widgets[key].append(
self._create_int_range_widget(key, disabled))
elif (data['type'] == 'float range'):
self._widgets[key].append(
self._create_float_range_widget(key, disabled))
elif (data['type'] == 'multiselection'):
self._widgets[key].append(
self._create_multiselection_widget(key, disabled))
elif (data['type'] == 'pairs'):
self._widgets[key].append(
self._create_pairs_widget(key, disabled))
else: # data['type'] == 'str'
self._widgets[key].append(
self._create_str_widget(key, disabled))
# Display the GUI
hboxLayout = Layout(align_items='center')
for key, widgetList in self._widgets.items():
display(widgets.HBox(widgetList, layout=hboxLayout))
# Finally, create the save interface to allow the user to save
# the current parameters values in a JSON file
display(widgets.HBox([], layout=Layout(height='15px')))
display(widgets.HBox([], layout=Layout(height='0px',
border='2px solid lightgray')))
display(widgets.HBox([], layout=Layout(height='2px')))
self._widgets['save'] = self._create_save_widget()
hboxLayout = Layout(justify_content='space-between',
align_items='center')
display(widgets.HBox(self._widgets['save'], layout=hboxLayout))
def _create_label(self, key, disabled):
# type: (str, bool) -> widgets.HTML
"""Return an HTML widget with the parameter's description.
If 'disabled' is False, the text will be in black, otherwise it
will be in gray.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
text = self._parameters[key]['description']
label = ("<p style=\"font-size:110%; line-height:19px; color:{0};\">{1}"
"</p>").format('Gray' if disabled else 'Black', text)
return widgets.HTML(value=label, style=self._style,
layout=Layout(width='50%'))
def _create_help_icon(self, key, disabled):
# type: (str, bool) -> widgets.HTML
"""Return an HTML widget with the parameter's help as tooltip of
a help icon.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
if ('help' in self._parameters[key]):
code = ("<link rel=\"stylesheet\" href=\"https://fonts.googleapis.c"
"om/icon?family=Material+Icons\"><i class=\"material-icons"
"\" style=\"color:{0}; font-size:18px; display:inline"
"-flex; vertical-align:middle;\" title=\"{1}\">help</i>"
"").format("SteelBlue", self._parameters[key]['help'])
else:
code = ''
layout = Layout(width='2%',
visibility='hidden' if disabled else 'visible')
return widgets.HTML(value=code, style=self._style, layout=layout)
def _create_str_widget(self, key, disabled):
# type: (str, bool) -> widgets.Text
"""Return a Text widget with the parameter's value.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
if ('example' in self._parameters[key]):
example = self._parameters[key]['example']
else:
example = ''
inputWidget = widgets.Text(
value=self[key], description=key, placeholder=example,
style=self._style, layout=Layout(width=self._inputWidth),
continuous_update=False, disabled=disabled)
# Add handler for when the "value" trait changes
inputWidget.observe(self._default_handler, names='value')
return inputWidget
def _create_bool_widget(self, key, disabled):
# type: (str, bool) -> widgets.HBox
"""Return an HBox containing a ToggleButton widget to represent
the parameter's value.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
inputWidget = _TaggedToggleButton(
value=self[key], description='Yes' if self[key] else 'No',
tag=key, style=self._style, layout=Layout(width='50%'),
button_style='primary', disabled=disabled)
# Add handler for when the "value" trait changes
inputWidget.observe(self._bool_handler, names='value')
layout = Layout(width=self._inputWidth, justify_content='center')
return widgets.HBox([inputWidget], layout=layout)
def _create_int_widget(self, key, disabled):
# type: (str, bool) -> widgets.BoundedIntText
"""Return a BoundedIntText widget with the parameter's value.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
inputWidget = widgets.BoundedIntText(
value=self[key], description=key, min=self._min(key),
max=self._max(key), style=self._style,
layout=Layout(width=self._inputWidth), continuous_update=False,
disabled=disabled)
# Save the widget's value in case its constructor automatically
# replaces an empty one given as argument
self._parameters[key]['value'] = inputWidget.value
# Add handler for when the "value" trait changes
inputWidget.observe(self._default_handler, names='value')
return inputWidget
def _create_float_widget(self, key, disabled):
# type: (str, bool) -> widgets.BoundedFloatText
"""Return a BoundedFloatText widget with the parameter's value.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
inputWidget = widgets.BoundedFloatText(
value=self[key], description=key, min=self._min(key),
max=self._max(key), step=self._floatStep, style=self._style,
layout=Layout(width=self._inputWidth), continuous_update=False,
disabled=disabled)
# Save the widget's value in case its constructor automatically
# replaces an empty one given as argument
self._parameters[key]['value'] = inputWidget.value
# Add handler for when the "value" trait changes
inputWidget.observe(self._default_handler, names='value')
return inputWidget
def _create_selection_widget(self, key, disabled):
# type: (str, bool) -> widgets.Dropdown
"""Return a Dropdown widget with the parameter's options and its
current value selected.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
inputWidget = widgets.Dropdown(
options=self._parameters[key]['options'], value=self[key],
description=key, style=self._style,
layout=Layout(width=self._inputWidth), disabled=disabled)
# Add handler for when the "value" trait changes
inputWidget.observe(self._default_handler, names='value')
return inputWidget
def _create_path_widget(self, key, disabled):
# type: (str, bool) -> widgets.HBox
"""Return an HBox containing a Text widget with the parameter's
value.
If the Text widget is enabled and the file does not exist, a
warning icon will be displayed next to it to alert the user.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
inputWidget = widgets.Text(
value=self[key], description=key, style=self._style,
layout=Layout(width='92%'), continuous_update=False,
disabled=disabled)
# Add handler for when the "value" trait changes
inputWidget.observe(self._path_handler, names='value')
# Create an HTML widget with a warning icon that will be
# displayed if the Text widget is enabled and the file does not
# exist
code = ("<link rel=\"stylesheet\" href=\"https://fonts.googleapis.com/i"
"con?family=Material+Icons\"><i class=\"material-icons\" style="
"\"font-size:18px; color:Red; display:inline-flex; vertical-ali"
"gn:middle;\" title=\"File not found!\">warning</i>")
warn = not disabled and not os.path.isfile(self[key])
layout = Layout(width='5%',
visibility='visible' if warn else 'hidden')
warnWidget = widgets.HTML(value=code, style=self._style, layout=layout)
layout = Layout(width='46%', justify_content='space-between')
return widgets.HBox([inputWidget, warnWidget], layout=layout)
def _create_int_range_widget(self, key, disabled):
# type: (str, bool) -> widgets.HBox
"""Return an HBox containing two BoundedIntText widgets with the
parameter's range values.
The widgets are created to fulfill the "int range" type
condition: lower_bound < upper_bound
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
lowerBound = widgets.BoundedIntText(
value=self[key][0], description=key, min=self._min(key),
max=self[key][1] - 1, style=self._style,
layout=Layout(width='50%'), continuous_update=False,
disabled=disabled)
# Save the widget's value in case its constructor automatically
# replaces an empty one given as argument
self._parameters[key]['value'][0] = lowerBound.value
# Add handler for when the "value" trait changes
lowerBound.observe(self._range_handler, names='value')
upperBound = widgets.BoundedIntText(
value=self[key][1], description=key, min=self[key][0] + 1,
max=self._max(key), style=self._style,
layout=Layout(width='50%'), continuous_update=False,
disabled=disabled)
# Save the widget's value in case its constructor automatically
# replaces an empty one given as argument
self._parameters[key]['value'][1] = upperBound.value
# Add handler for when the "value" trait changes
upperBound.observe(self._range_handler, names='value')
return widgets.HBox([lowerBound, upperBound],
layout=Layout(width=self._inputWidth))
def _create_float_range_widget(self, key, disabled):
# type: (str, bool) -> widgets.HBox
"""Return an HBox containing two BoundedFloatText widgets with
the parameter's range values.
The widgets are created to fulfill the "float range" type
condition: lower_bound < upper_bound
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
lowerBound = widgets.BoundedFloatText(
value=self[key][0], description=key, min=self._min(key),
max=self[key][1] - self._floatStep, step=self._floatStep,
style=self._style, layout=Layout(width='50%'),
continuous_update=False, disabled=disabled)
# Save the widget's value in case its constructor automatically
# replaces an empty one given as argument
self._parameters[key]['value'][0] = lowerBound.value
# Add handler for when the "value" trait changes
lowerBound.observe(self._range_handler, names='value')
upperBound = widgets.BoundedFloatText(
value=self[key][1], description=key,
min=self[key][0] + self._floatStep, max=self._max(key),
step=self._floatStep, style=self._style,
layout=Layout(width='50%'), continuous_update=False,
disabled=disabled)
# Save the widget's value in case its constructor automatically
# replaces an empty one given as argument
self._parameters[key]['value'][1] = upperBound.value
# Add handler for when the "value" trait changes
upperBound.observe(self._range_handler, names='value')
return widgets.HBox([lowerBound, upperBound],
layout=Layout(width=self._inputWidth))
def _create_multiselection_widget(self, key, disabled):
# type: (str, bool) -> widgets.Box
"""Return a Box containing as many Checkbox widgets as
parameter's options, with those in its "value" field checked.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
itemWidgets = []
for item in self._parameters[key]['options']:
layoutWidth = '23%' if (len(item) <= 10) else '48%'
inputWidget = _TaggedCheckbox(
value=item in self[key], description=item, tag=key,
style=self._style, layout=Layout(width=layoutWidth),
disabled=disabled)
# Add handler for when the "value" trait changes
inputWidget.observe(self._multiselection_handler, names='value')
itemWidgets.append(inputWidget)
layout = Layout(width='46%', display='flex', flex_flow='row wrap',
justify_content='space-between')
return widgets.Box(itemWidgets, layout=layout)
def _create_pairs_widget(self, key, disabled):
# type: (str, bool) -> widgets.HBox
"""Return an HBox containing the interface to add and remove
pairs of available elements.
The term "available elements" refers to those elements in the
first column of the CSV file's path stored under the parameter's
"file" key. Users will not be able to add existing pairs or
pairs formed by the same element twice.
Keyword Arguments:
key -- name of the parameter
disabled -- is the parameter/widget disabled?
"""
# Load the list of available elements from the first column of
# the CSV file saved under the parameter's "file" key
srcFilePath = self[self._parameters[key]['file']]
options = pandas.read_csv(srcFilePath).iloc[:, 0].tolist()
# Create two Select widgets with the list of available elements
leftSelect = widgets.Select(
options=options, rows=4, style=self._style,
layout=Layout(width='20%'), disabled=disabled)
rightSelect = widgets.Select(
options=options, rows=4, style=self._style,
layout=Layout(width='20%'), disabled=disabled)
# Create the add and remove buttons with the handler to add and
# remove pairs, respectively
addButton = _TaggedButton(
description='Pair >>', tooltip='Add new pair', tag=key,
layout=Layout(width='95%'), disabled=disabled)
# Add handlerfor when the button is clicked
addButton.on_click(self._pairs_add_handler)
delButton = _TaggedButton(
description='<< Remove', tooltip='Remove selected pair',
tag=key, layout=Layout(width='95%'), disabled=disabled)
# Add handler for when the button is clicked
delButton.on_click(self._pairs_del_handler)
layout = Layout(width='21%', justify_content='space-around')
# Hold the buttons in a VBox to get the desired layout
buttonsBox = widgets.VBox([addButton, delButton], layout=layout)
# Create a Select widget with the parameter's list of pairs
pairs = [' , '.join(x) for x in self[key]]
pairsSelect = widgets.Select(
options=pairs, rows=4, style=self._style,
layout=Layout(width='28%'), disabled=disabled)
layout = Layout(width='46%', justify_content='space-around')
return widgets.HBox([leftSelect, rightSelect, buttonsBox, pairsSelect],
layout=layout)
def _create_save_widget(self):
# type: () -> list
"""Return a list containing the interface to save the current
parameters values as a JSON file in an introduced path.
"""
text = ("<p style=\"font-size:110%; line-height:19px; color:Black;\">"
"Where do you want to save the new set of parameters?</p>")
label = widgets.HTML(value=text, style=self._style,
layout=Layout(width='38%'))
# Create the path input widget (Text) with a default path and
# file name
defaultPath = normalise_path("parameters.json")
inputWidget = widgets.Text(
value=defaultPath, placeholder=defaultPath, style=self._style,
layout=Layout(width='40%'), continuous_update=False)
# Add handler for when the "value" trait changes
inputWidget.observe(self._save_path_handler, names='value')
# Create an HTML widget with a warning icon that will be
# displayed if the directory path does not exist
code = ("<link rel=\"stylesheet\" href=\"https://fonts.googleapis.com/i"
"con?family=Material+Icons\"><i class=\"material-icons\" style="
"\"font-size:18px; color:Red; display:inline-flex; vertical-ali"
"gn:middle;\" title=\"Path not found!\">warning</i>")
dirPath = os.path.split(inputWidget.value)[0]
visibility = 'visible' if not os.path.isdir(dirPath) else 'hidden'
layout = Layout(width='2%', visibility=visibility)
warnWidget = widgets.HTML(value=code, style=self._style, layout=layout)
# Create a save button that will be active only if every active
# parameter is valid and the destination path exists
saveButton = widgets.Button(
description='Save', button_style='danger',
tooltip='Save parameters in a JSON file',
layout=Layout(width='12%', height='35px'),
disabled=not self._valid_parameters())
# Add handler for when the button is clicked
saveButton.on_click(self._save_button_handler)
return [label, inputWidget, warnWidget, saveButton]
def _update(self):
# type: () -> None
"""Return an HBox containing the interface to add and remove
pairs of available elements.
The term "available elements" refers to those elements in the
first column of the CSV file's path stored under the parameter's
"file" key. Users will not be able to add existing pairs or
pairs formed by the same element twice. If the CSV file path
changes, the pairs list will be emptied and the set of available
elements will be updated.
"""
# Update the status and/or visibility of each parameter's widget
for key in self._parameters.keys():
interface = self._widgets[key]
disabled = not self._is_active(key)
if (disabled):
interface[0].value = interface[0].value.replace('Black', 'Gray')
else:
interface[0].value = interface[0].value.replace('Gray', 'Black')
interface[1].layout.visibility = 'hidden' if disabled else 'visible'
typeStr = self._parameters[key]['type']
if (typeStr == 'bool'):
interface[2].children[0].disabled = disabled
elif (typeStr in ['int', 'float']):
# Update minimum and maximum bounds too
interface[2].min = self._min(key)
interface[2].max = self._max(key)
interface[2].disabled = disabled
elif (typeStr == 'path'):
interface[2].children[0].disabled = disabled
# Display the warning widget if the parameter is enabled
# and the file does not exist
if (not disabled and not os.path.isfile(self[key])):
interface[2].children[1].layout.visibility = 'visible'
else:
interface[2].children[1].layout.visibility = 'hidden'
elif (typeStr in ['int range', 'float range']):
# Update minimum and maximum bounds of the range too
interface[2].children[0].min = self._min(key)
interface[2].children[0].disabled = disabled
interface[2].children[1].max = self._max(key)
interface[2].children[1].disabled = disabled
elif (typeStr == 'multiselection'):
for child in interface[2].children:
child.disabled = disabled
elif (typeStr == 'pairs'):
interface[2].children[0].disabled = disabled
interface[2].children[1].disabled = disabled
for grandchild in interface[2].children[2].children:
grandchild.disabled = disabled
interface[2].children[3].disabled = disabled
else:
interface[2].disabled = disabled
# Ensure the save button should be available and ready to save
# the new set of parameters
self._widgets['save'][3].description = 'Save'
self._widgets['save'][3].icon = ''
self._widgets['save'][3].disabled = not self._valid_parameters()
def _default_handler(self, change):
# type: (dict) -> None
"""Handle the "value" trait change assigning the new value to
the corresponding parameter.
The update() method is launched at the end to ensure every
widget is updated according to the change in this parameter.
Keyword Arguments:
change -- dict holding the information about the change
"""
key = change['owner'].description
self._parameters[key]['value'] = change['new']
self._update()
def _bool_handler(self, change):
# type: (dict) -> None
"""Handle the "value" trait change assigning the new value to
the corresponding "bool" type parameter.
The update() method is launched at the end to ensure every
widget is updated according to the change in this parameter.
Keyword Arguments:
change -- dict holding the information about the change
"""
key = change['owner'].tag
self._parameters[key]['value'] = change['new']
# Change ToggleButton's description to "Yes" or "No" depending
# on whether its new value is True or False, respectively
change['owner'].description = 'Yes' if change['new'] else 'No'
self._update()
def _path_handler(self, change):
# type: (dict) -> None
"""Handle the "value" trait change assigning the new value to
the corresponding "path" type parameter.
The update() method is launched at the end to ensure every
widget is updated according to the change in this parameter.
Keyword Arguments:
change -- dict holding the information about the change
"""
key = change['owner'].description
self._parameters[key]['value'] = normalise_path(change['new'])
# Replace the introduced path by its normalised version to
# provide the user with more information in case there is
# something wrong with the path
change['owner'].value = self[key]
# Get the "pairs" type parameter that has this parameter in its
# "field" key to update the contents of its widgets
for param, data in self._parameters.items():
if ((data['type'] == 'pairs') and (data['file'] == key)):
pairsWidget = self._widgets[param][2]
if (os.path.isfile(self[key])):
# Update the information of available elements
options = pandas.read_csv(self[key]).iloc[:, 0].tolist()
pairsWidget.children[0].options = options
pairsWidget.children[1].options = options
else:
# Since the file does not exist, there are no
# available elements
pairsWidget.children[0].options = []
pairsWidget.children[1].options = []
# Since the file has changed, empty the list of pairs
self._parameters[param]['value'] = []
pairsWidget.children[3].options = []
break
self._update()
def _range_handler(self, change):
# type: (dict) -> None
"""Handle the "value" trait change assigning the new value to
the corresponding "int/float range" type parameter.
The update() method is launched at the end to ensure every
widget is updated according to the change in this parameter.
Keyword Arguments:
change -- dict holding the information about the change
"""
key = change['owner'].description
# Both children have the same step
step = self._widgets[key][2].children[0].step
if (change['owner'].min == self._min(key)):
# Trait changed in the widget corresponding to the lower
# bound of the range
self._parameters[key]['value'][0] = change['new']
self._widgets[key][2].children[1].min = change['new'] + step
else:
# Trait changed in the widget corresponding to the upper
# bound of the range
self._parameters[key]['value'][1] = change['new']
self._widgets[key][2].children[0].max = change['new'] - step
self._update()
def _multiselection_handler(self, change):
# type: (dict) -> None
"""Handle the "value" trait change updating the list of values
of the corresponding "multiselection" type parameter.
The update() method is launched at the end to ensure every
widget is updated according to the change in this parameter.
Keyword Arguments:
change -- dict holding the information about the change
"""
key = change['owner'].tag
if (change['new']):
self._parameters[key]['value'].append(change['owner'].description)
else:
self._parameters[key]['value'].remove(change['owner'].description)
self._update()
def _pairs_add_handler(self, button):
# type: (_TaggedButton) -> None
"""Handle when the button is clicked to add a pair to the
corresponding "pairs" type parameter.
The update() method is launched at the end to ensure every
widget is updated according to the change in this parameter.
Keyword Arguments:
button -- clicked button widget instance
"""
key = button.tag
# Add selected elements in both Selection widgets as a new pair
leftSel = self._widgets[key][2].children[0].value
rightSel = self._widgets[key][2].children[1].value
newPair = [leftSel, rightSel]
# The pairs are considered sets, that is, the order of the
# elements is ignored
if ((leftSel != rightSel) and (newPair not in self[key])
and (newPair[::-1] not in self[key])):
self._parameters[key]['value'].append(newPair)
# Since the "options" field is a tuple, build a new list
# with the new pair
self._widgets[key][2].children[3].options = \
[' , '.join(x) for x in self[key]]
self._update()
def _pairs_del_handler(self, button):
# type: (_TaggedButton) -> None
"""Handle when the button is clicked to remove a pair of the
corresponding "pairs" type parameter.
The update() method is launched at the end to ensure every
widget is updated according to the change in this parameter.
Keyword Arguments:
button -- clicked button widget instance
"""
key = button.tag
pairsWidget = self._widgets[key][2].children[3]
# Get the selected pair from the pairs widget
pairSel = pairsWidget.value
if (pairSel):
pair = pairSel.split(' , ')
self._parameters[key]['value'].remove(pair)
# Since the "options" field is a tuple, build a new list
# without the deleted pair
pairsWidget.options = [' , '.join(x) for x in self[key]]
# Select the first pair to ensure coherence with the change
if (pairsWidget.options):
pairsWidget.value = pairsWidget.options[0]
self._update()
def _save_path_handler(self, change):
# type: (dict) -> None
"""Handle the "value" trait change checking if the path where to
save the parameters values exists.
A warning sign will be displayed if the given directory path
does not exist. The update() method is launched at the end to
ensure every widget is updated according to the change in this
parameter.
Keyword Arguments:
change -- dict holding the information about the change
"""
newPath = normalise_path(change['new'])
dirPath = os.path.split(newPath)[0]
if (not os.path.isdir(dirPath)):
self._widgets['save'][2].layout.visibility = 'visible'
else:
self._widgets['save'][2].layout.visibility = 'hidden'
# Replace the introduced path by its normalised version to
# provide the user with more information in case there is
# something wrong
change['owner'].value = newPath
self._update()
def _save_button_handler(self, button):
# type: (widgets.Button) -> None
"""Handle when the button is clicked to save the parameters
values in a JSON file.
Keyword Arguments:
button -- clicked button widget instance
"""
self.write(self._widgets['save'][1].value)
# Change the button's text to tell the user the JSON parameters
# file has been correctly created
button.description = 'Saved'
button.icon = 'check'
def _min(self, key):
# type: (str) -> object
"""Return the largest value in the parameter's "min" list.
Applies round() method to the output of LFParameter._max() to
get a more comparable result regarding floating point arithmetic
issues.
Keyword Arguments:
key -- name of the parameter
"""
return round(LFParameters._min(self, key), self._floatPointPrecision)
def _max(self, key):
# type: (str) -> object
"""Return the smallest value in the parameter's "max" list.
Applies round() method to the output of LFParameter._max() to
get a more comparable result regarding floating point arithmetic
issues.
Keyword Arguments:
key -- name of the parameter
"""
return round(LFParameters._max(self, key), self._floatPointPrecision)
def _valid_parameters(self):
# type: () -> bool
"""Return True if every active parameter has a valid value,
False otherwise.
The list of valid parameters also includes "save" destination
path, where the JSON parameters file will be saved.
"""
enabledKeys = (x for x in self._parameters.keys() if self._is_active(x))
for key in enabledKeys:
data = self._parameters[key]
# Only "multiselection" type parameters can be empty ([])
if ((data['type'] != 'multiselection')
and (data['value'] in [None, '', []])):
return False
# "path" type parameters must be checked manually, whilst
# the rest are already controlled by their widget
if ((data['type'] == 'path') and not os.path.isfile(data['value'])):
return False
# This method is also called when the save interface is being
# created, so the "save" key will not exist yet
if ('save' in self._widgets):
# Check if the directory path where to save the JSON
# parameters file exists
dirPath = os.path.split(self._widgets['save'][1].value)[0]
if (not os.path.isdir(dirPath)):
return False
return True
| mit | -4,986,626,818,987,311,000 | 45.34934 | 80 | 0.596623 | false | 4.573984 | false | false | false |
savioabuga/phoenix-template | phoenix/apps/records/views.py | 1 | 5138 | from django.contrib import messages
from django.shortcuts import HttpResponseRedirect
from django.core.urlresolvers import reverse
from smartmin.views import SmartCRUDL, SmartCreateView, SmartReadView, SmartListView
from phoenix.apps.animals.models import Animal
from phoenix.apps.utils.upload.views import UploadView, UploadListView, UploadDeleteView
from .models import AnimalNote, AnimalDocument
class AnimalDocumentUploadView(UploadView):
model = AnimalDocument
delete_url = 'records.animaldocument_delete'
def get_context_data(self, **kwargs):
context = super(AnimalDocumentUploadView, self).get_context_data(**kwargs)
#context['animal'] = self.request.animal
return context
class AnimalDocumentListView(UploadListView):
model = AnimalDocument
delete_url = 'records.animaldocument_delete'
def get_queryset(self):
return AnimalDocument.objects.all()# filter(animal=self.kwargs['animal_id']).filter(deleted=False)
class AnimalDocumentDeleteView(UploadDeleteView):
model = AnimalDocument
class AnimalNoteCRUDL(SmartCRUDL):
model = AnimalNote
class FormMixin(object):
def __init__(self, **kwargs):
# Prevent cyclic import errors
from .forms import AnimalNoteForm
self.form_class = AnimalNoteForm
super(AnimalNoteCRUDL.FormMixin, self).__init__(**kwargs)
class Create(FormMixin, SmartCreateView):
def get(self, request, *args, **kwargs):
animal_id = request.GET.get('animal', None)
if not animal_id:
messages.warning(request, 'Animal Id is required')
return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
return super(AnimalNoteCRUDL.Create, self).get(request, *args, **kwargs)
def pre_save(self, obj):
animal_id = self.request.GET.get('animal', None)
try:
animal = Animal.objects.get(id=animal_id)
except AnimalNote.DoesNotExist:
messages.error(self.request, 'Animal Id is required')
else:
obj.animal = animal
return obj
def get_success_url(self):
return reverse('animals.animal_read', args=[self.request.GET.get('animal', None)])
class Read(SmartReadView):
fields = ('id', 'date', 'file', 'details', 'created', 'modified')
def get_file(self, obj):
return '<a href=' + obj.file.url + '>' + obj.file.name + '</a>'
class List(SmartListView):
fields = ('id', 'date', 'file', 'details')
def get_file(self, obj):
if obj.file:
return '<a href=' + obj.file.url + '>' + obj.file.name + '</a>'
return ''
def get_queryset(self, **kwargs):
queryset = super(AnimalNoteCRUDL.List, self).get_queryset(**kwargs)
queryset = queryset.filter(animal=self.request.animal)
return queryset
# class AnimalGroupNoteCRUDL(SmartCRUDL):
# model = AnimalGroupNote
#
# class FormMixin(object):
#
# def __init__(self, **kwargs):
# # Prevent cyclic import errors
# from .forms import AnimalGroupNoteForm
# self.form_class = AnimalGroupNoteForm
# super(AnimalGroupNoteCRUDL.FormMixin, self).__init__(**kwargs)
#
# class Create(FormMixin, SmartCreateView):
#
# def get(self, request, *args, **kwargs):
# animalgroup_id = request.GET.get('group', None)
# if not animalgroup_id:
# messages.warning(request, 'Animal Group Id is required')
# return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
# return super(AnimalGroupNoteCRUDL.Create, self).get(request, *args, **kwargs)
#
# def pre_save(self, obj):
# animalgroup_id = self.request.GET.get('group', None)
# try:
# animalgroup = AnimalGroup.objects.get(id=animalgroup_id)
# except AnimalGroup.DoesNotExist:
# messages.error(self.request, 'Animal Id is required')
# else:
# obj.animalgroup = animalgroup
# return obj
#
# def get_success_url(self):
# return reverse('groups.animalgroup_read', args=[self.request.GET.get('group', None)])
#
# class Read(SmartReadView):
# fields = ('id', 'date', 'file', 'details', 'created', 'modified')
#
# def get_file(self, obj):
# if obj.file:
# return '<a href=' + obj.file.url + '>' + obj.file.name + '</a>'
# return ''
#
# class List(SmartListView):
# fields = ('id', 'date', 'file', 'details')
#
# def get_file(self, obj):
# if obj.file:
# return '<a href=' + obj.file.url + '>' + obj.file.name + '</a>'
# return ''
#
# def get_queryset(self, **kwargs):
# queryset = super(AnimalGroupNoteCRUDL.List, self).get_queryset(**kwargs)
# queryset = queryset.filter(animalgroup=self.request.animalgroup)
# return queryset | bsd-3-clause | -1,207,090,966,957,749,500 | 36.510949 | 106 | 0.602374 | false | 3.704398 | false | false | false |
0lidaxiang/WeArt | chapter/view/getChapter.py | 1 | 2117 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import json
from django.http import JsonResponse
from django.shortcuts import render
from book.models import book
from chapter.models import chapter
def bookChapter(request):
context = {}
# get the book id of user input if it is not null
if 'idBook' not in request.GET:
context['status'] = "fail"
context['message'] = "The idBook variable is not in request.GET."
return JsonResponse(context)
inputIdBook = request.GET['idBook']
# get the book name of user input if it is not null
# if 'bookName' not in request.GET:
# context['status'] = "fail"
# context['message'] = "The bookName variable is not in request.GET."
# return JsonResponse(context)
# bookName = request.GET['bookName']
bookName = ""
res, status, mes = book.getValue(inputIdBook, "name")
if res:
bookName = mes
else:
print "getchapter bookChapter error" + str(status)
return render(request, 'chapter/bookChapter.html', context={'idBook': inputIdBook,'bookName': bookName})
def getChapter(request):
context = {}
reload(sys)
sys.setdefaultencoding('utf8')
# get the new book name of user input if it is not null
if 'idBook' not in request.GET:
context['status'] = "fail"
context['message'] = "The idBook variable is not in request.GET."
return JsonResponse(context)
inputIdBook = request.GET['idBook']
res, statusNumber, mes = chapter.getAll(inputIdBook)
if not res:
context['status'] = "fail"
context['message'] = "錯誤: " + mes
return JsonResponse(context)
context['status'] = "success"
response_data = []
for m in mes:
response_record = {}
response_record['id'] = m.id
response_record['name'] = m.name
response_record['chapterOrder'] = m.chapterOrder
response_record['book_name'] = book.getValue(m.idBook_id, "name")[2]
response_data.append(response_record)
context["message"] = response_data
return JsonResponse(context)
| bsd-3-clause | 3,381,290,190,217,921,000 | 30.044118 | 108 | 0.639981 | false | 3.789946 | false | false | false |
quasiyoke/RandTalkBot | randtalkbot/stats.py | 1 | 1668 | # RandTalkBot Bot matching you with a random person on Telegram.
# Copyright (C) 2016 quasiyoke
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import json
import logging
from peewee import DateTimeField, Model, Proxy, TextField
LOGGER = logging.getLogger('randtalkbot.stats')
def _(string):
return string
DATABASE_PROXY = Proxy()
RATIO_MAX = 10
class Stats(Model):
data_json = TextField()
created = DateTimeField(default=datetime.datetime.utcnow, index=True)
class Meta:
database = DATABASE_PROXY
def __init__(self, *args, **kwargs):
super(Stats, self).__init__(*args, **kwargs)
self._data_cache = None
def get_data(self):
if self._data_cache is None:
self._data_cache = json.loads(self.data_json)
return self._data_cache
def set_data(self, data):
self._data_cache = data
self.data_json = json.dumps(data)
def get_sex_ratio(self):
"""https://en.wikipedia.org/wiki/Human_sex_ratio
Returns:
float: Ratio of males over the females.
"""
try:
sex_data = self.get_data()['sex_distribution']
except (KeyError, TypeError):
return 1
males_count = sex_data.get('male', 0)
females_count = sex_data.get('female', 0)
if males_count > 0 and females_count > 0:
return males_count / females_count
elif males_count > 0:
return RATIO_MAX
elif females_count > 0:
return 1 / RATIO_MAX
return 1
| agpl-3.0 | 3,084,319,754,593,820,700 | 25.47619 | 74 | 0.615707 | false | 3.641921 | false | false | false |
tedlaz/pyted | misthodosia/m13a/f_newCoWizard.py | 1 | 16787 | # -*- coding: utf-8 -*-
'''
Created on 15 Φεβ 2013
@author: tedlaz
'''
sqlco = u"INSERT INTO m12_co VALUES (1,'{0}','{1}','{2}',{3},'{4}','{5}','{6}','{7}','{8}','{9}','{10}','{11}','{12}','{13}')"
from PyQt4 import QtCore, QtGui,Qt
import utils_db,widgets
import osyk
from utils_qt import fFindFromList
import datetime
class NewDbWizard(QtGui.QWizard):
def __init__(self, parent=None):
super(NewDbWizard, self).__init__(parent)
#self.setAttribute(Qt.Qt.WA_DeleteOnClose) Οχι γιατί δημιουργείται πρόβλημα ...
#self.addPage(IntroPage())
self.addPage(coDataPage())
self.addPage(coDataPage2())
self.addPage(filePage())
self.addPage(finalPage())
self.setWizardStyle(QtGui.QWizard.ModernStyle)
self.setOption(QtGui.QWizard.IndependentPages,True)
#self.setPixmap(QtGui.QWizard.BannerPixmap,QtGui.QPixmap(':/banner'))
#self.setPixmap(QtGui.QWizard.BackgroundPixmap, QtGui.QPixmap(':/background'))
self.setWindowTitle(u"Οδηγός Δημιουργίας Νέου Αρχείου Μισθοδοσίας")
def accept(self):
#print '%s %s %s' % (self.field('epon'),self.field('cotyp_id'),self.field('fname'))
fileSql = open(osyk.newDbFile)
script = u''
for lines in fileSql:
script += u'%s' % lines.decode('utf-8')
utils_db.executeScript(script, self.field('fname'))
sqlCo = sqlco.format(self.field('epon'),self.field('onom'),self.field('patr'),self.field('cotyp_id'),
self.field('ame'),self.field('afm'),self.field('doy'),self.field('dra'),
self.field('pol'),self.field('odo'),self.field('num'),self.field('tk'),
self.field('ikac'),self.field('ikap'))
print sqlCo
utils_db.commitToDb(sqlCo, self.field('fname'))
sqlCoy = u"INSERT INTO m12_coy VALUES (1,1,'Κεντρικό','%s')" % self.field('kad')
utils_db.commitToDb(sqlCoy, self.field('fname'))
etos = datetime.datetime.now().year
utils_db.commitToDb(u"INSERT INTO m12_xrisi (xrisi,xrisip) VALUES ('{0}','Χρήση {0}')".format(etos), self.field('fname'))
eidList = osyk.eid_cad_listFilteredDouble(self.field('kad'))
#print eidList
sqleid_ = u"INSERT INTO m12_eid (eidp,keid) VALUES ('{0}','{1}');\n"
sqleid = u''
for el in eidList:
sqleid += sqleid_.format(el[1],el[0])
utils_db.executeScript(sqleid,self.field('fname'))
super(NewDbWizard, self).accept()
class IntroPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(IntroPage, self).__init__(parent)
self.setTitle(u"Οδηγίες")
#self.setPixmap(QtGui.QWizard.WatermarkPixmap, QtGui.QPixmap(':/watermark1'))
label = QtGui.QLabel(u"Αυτός ο οδηγός θα δημιουργήσει νέο Αρχείο Μισθοδοσίας.\n\n "
u"Εσείς θα πρέπει απλά να εισάγετε τις απαραίτητες παραμέτρους "
u"καθώς και το όνομα του αρχείου και το σημείο αποθήκευσης.\n\n"
u"Μπορείτε σε κάθε βήμα να αναθεωρήσετε και να επιστρέψετε.\n\n"
u"Πατήστε δημιουργία στην τελευταία οθόνη για να ολοκληρώσετε.")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
class coDataPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(coDataPage, self).__init__(parent)
#parent.button(QtGui.QWizard.BackButton).setVisible(False)
#self.buttonText(QtGui.QWizard.NextButton)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.NextButton,u'Επόμενο >')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Πληροφορίες εταιρίας")
self.setSubTitle(u"Συμπληρώστε τα βασικά στοιχεία της εταιρίας")
#self.setPixmap(QtGui.QWizard.LogoPixmap, QtGui.QPixmap(':/logo1'))
cotypLabel = QtGui.QLabel(u"Τύπος επιχείρησης:")
cotyp = widgets.DbComboBox([[1,u'Νομικό Πρόσωπο'],[2,u'Φυσικό Πρόσωπο']])
cotypLabel.setBuddy(cotyp)
eponNameLabel = QtGui.QLabel(u"Επωνυμία:")
eponNameLineEdit = QtGui.QLineEdit()
eponNameLabel.setBuddy(eponNameLineEdit)
onomLabel = QtGui.QLabel(u"Όνομα (Για φυσικά πρόσωπα):")
onomLineEdit = QtGui.QLineEdit()
onomLineEdit.setDisabled(True)
onomLabel.setBuddy(onomLineEdit)
patrLabel = QtGui.QLabel(u"Πατρώνυμο (Για φυσικά πρόσωπα):")
patrLineEdit = QtGui.QLineEdit()
patrLineEdit.setDisabled(True)
patrLabel.setBuddy(patrLineEdit)
cotypValue = QtGui.QLineEdit()
cotypValue.setText('1')
def onCotypActivated():
if cotyp.currentIndex() ==1:
onomLineEdit.setDisabled(False)
patrLineEdit.setDisabled(False)
cotypValue.setText('2')
else:
onomLineEdit.setText('')
patrLineEdit.setText('')
onomLineEdit.setDisabled(True)
patrLineEdit.setDisabled(True)
cotypValue.setText('1')
cotyp.activated.connect(onCotypActivated)
kadLabel = QtGui.QLabel(u"Κωδικός αρ.Δραστηριότητας:")
kadLineEdit = QtGui.QLineEdit()
kadLabel.setBuddy(kadLineEdit)
kadLineEdit.setReadOnly(True)
kadFindButton = QtGui.QPushButton(u'Εύρεση ΚΑΔ')
kadLayout = QtGui.QHBoxLayout()
kadLayout.addWidget(kadLineEdit)
kadLayout.addWidget(kadFindButton)
kadpLabel = QtGui.QLabel(u"Περιγραφή αρ.Δραστηριότητας:")
kadpTextEdit = QtGui.QTextEdit()
kadpLabel.setBuddy(kadpTextEdit)
kadpTextEdit.setReadOnly(True)
draLabel = QtGui.QLabel(u"Συντομογραφία Δραστηριότητας:")
draLineEdit = QtGui.QLineEdit()
draLabel.setBuddy(draLineEdit)
def openFindDlg():
kadList = osyk.cad_list()
head = [u'ΚΑΔ',u'Περιγραφή']
cw = [35,300]
form = fFindFromList(kadList,head,cw)
if form.exec_() == QtGui.QDialog.Accepted:
kadLineEdit.setText(form.array[0])
kadpTextEdit.setText(form.array[1])
kadFindButton.clicked.connect(openFindDlg)
self.registerField('cotyp_id',cotypValue)
self.registerField('epon*', eponNameLineEdit)
self.registerField('onom', onomLineEdit)
self.registerField('patr', patrLineEdit)
self.registerField('kad*', kadLineEdit)
self.registerField('dra*', draLineEdit)
#self.registerField('kadt*', kadpTextEdit)
layout = QtGui.QGridLayout()
layout.addWidget(cotypLabel, 0, 0)
layout.addWidget(cotyp, 0, 1)
layout.addWidget(eponNameLabel, 1, 0)
layout.addWidget(eponNameLineEdit, 1, 1)
layout.addWidget(onomLabel, 2, 0)
layout.addWidget(onomLineEdit, 2, 1)
layout.addWidget(patrLabel, 3, 0)
layout.addWidget(patrLineEdit, 3, 1)
layout.addWidget(kadLabel, 4, 0)
layout.addLayout(kadLayout, 4, 1)
layout.addWidget(kadpLabel,5, 0)
layout.addWidget(kadpTextEdit, 5, 1,2,1)
layout.addWidget(draLabel,7, 0)
layout.addWidget(draLineEdit,7, 1)
self.setLayout(layout)
class coDataPage2(QtGui.QWizardPage):
def __init__(self, parent=None):
super(coDataPage2, self).__init__(parent)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.NextButton,u'Επόμενο >')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Πληροφορίες εταιρίας")
self.setSubTitle(u"Συμπληρώστε τα υπόλοιπα στοιχεία της εταιρίας")
afmLabel = QtGui.QLabel(u"ΑΦΜ:")
afmLineEdit = QtGui.QLineEdit()
afmLabel.setBuddy(afmLineEdit)
doyLabel = QtGui.QLabel(u"ΔΟΥ:")
doyLineEdit = QtGui.QLineEdit()
doyLabel.setBuddy(doyLineEdit)
doyLineEdit.setReadOnly(True)
doyFindButton = QtGui.QPushButton(u'...')
doyFindButton.setMaximumSize(QtCore.QSize(20, 50))
doyLayout = QtGui.QHBoxLayout()
doyLayout.addWidget(doyLineEdit)
doyLayout.addWidget(doyFindButton)
def openFindDlg():
head = [u'Κωδ',u'ΔΟΥ']
cw = [35,300]
form = fFindFromList(osyk.doy_list(),head,cw)
if form.exec_() == QtGui.QDialog.Accepted:
doyLineEdit.setText(form.array[1])
doyFindButton.clicked.connect(openFindDlg)
poliLabel = QtGui.QLabel(u"Πόλη:")
poliLineEdit = QtGui.QLineEdit()
poliLabel.setBuddy(poliLineEdit)
tkLabel = QtGui.QLabel(u"Ταχ.Κωδικός:")
tkLineEdit = QtGui.QLineEdit()
tkLabel.setBuddy(tkLineEdit)
odosLabel = QtGui.QLabel(u"Οδός:")
odosLineEdit = QtGui.QLineEdit()
odosLabel.setBuddy(odosLineEdit)
numLabel = QtGui.QLabel(u"Αριθμός:")
numLineEdit = QtGui.QLineEdit()
numLabel.setBuddy(numLineEdit)
ameLabel = QtGui.QLabel(u"Αρ.Μητρ.ΙΚΑ:")
ameLineEdit = QtGui.QLineEdit()
ameLabel.setBuddy(ameLineEdit)
ikacLabel = QtGui.QLabel(u"Κωδ.ΙΚΑ:")
ikacLineEdit = QtGui.QLineEdit()
ikacLabel.setBuddy(ikacLineEdit)
ikacLineEdit.setReadOnly(True)
ikaLabel = QtGui.QLabel(u"Υπ/μα.ΙΚΑ:")
ikaLineEdit = QtGui.QLineEdit()
ikaLabel.setBuddy(ikaLineEdit)
ikaLineEdit.setReadOnly(True)
ikaFindButton = QtGui.QPushButton(u'...')
ikaFindButton.setMaximumSize(QtCore.QSize(20, 50))
ikaLayout = QtGui.QHBoxLayout()
ikaLayout.addWidget(ikaLineEdit)
ikaLayout.addWidget(ikaFindButton)
def openFindDlgIKA():
head = [u'Κωδ',u'Υποκατάστημα ΙΚΑ']
cw = [35,300]
form = fFindFromList(osyk.ika_list(),head,cw)
if form.exec_() == QtGui.QDialog.Accepted:
ikacLineEdit.setText(form.array[0])
ikaLineEdit.setText(form.array[1])
ikaFindButton.clicked.connect(openFindDlgIKA)
self.registerField('afm*',afmLineEdit)
self.registerField('doy*',doyLineEdit)
self.registerField('pol*',poliLineEdit)
self.registerField('odo',odosLineEdit)
self.registerField('num',numLineEdit)
self.registerField('tk',tkLineEdit)
self.registerField('ikac*',ikacLineEdit)
self.registerField('ikap*',ikaLineEdit)
self.registerField('ame*',ameLineEdit)
layout = QtGui.QGridLayout()
layout.addWidget(afmLabel, 0, 0)
layout.addWidget(afmLineEdit, 0, 1)
layout.addWidget(doyLabel, 0, 2)
layout.addLayout(doyLayout, 0, 3)
layout.addWidget(poliLabel, 1, 0)
layout.addWidget(poliLineEdit, 1, 1)
layout.addWidget(tkLabel, 1, 2)
layout.addWidget(tkLineEdit, 1, 3)
layout.addWidget(odosLabel, 2, 0)
layout.addWidget(odosLineEdit, 2, 1)
layout.addWidget(numLabel, 2, 2)
layout.addWidget(numLineEdit, 2, 3)
layout.addWidget(ameLabel, 3, 0)
layout.addWidget(ameLineEdit, 3, 1)
layout.addWidget(ikacLabel, 4, 0)
layout.addWidget(ikacLineEdit, 4, 1)
layout.addWidget(ikaLabel, 4, 2)
layout.addLayout(ikaLayout, 4, 3)
self.setLayout(layout)
class filePage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(filePage, self).__init__(parent)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.NextButton,u'Επόμενο >')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Όνομα αρχείου")
self.setSubTitle(u"Δώστε όνομα και περιοχή αποθήκευσης")
#self.setPixmap(QtGui.QWizard.LogoPixmap, QtGui.QPixmap(':/logo1'))
fileNameLabel = QtGui.QLabel(u"Όνομα αρχείου:")
self.fileNameLineEdit = QtGui.QLineEdit()
self.fileNameLineEdit.setReadOnly(True)
fileNameLabel.setBuddy(self.fileNameLineEdit)
butFile = QtGui.QPushButton(u'...')
butFile.clicked.connect(self.fSave)
fileLayout = QtGui.QHBoxLayout()
fileLayout.addWidget(self.fileNameLineEdit)
fileLayout.addWidget(butFile)
patrLabel = QtGui.QLabel(u"Πατρώνυμο (Για φυσικά πρόσωπα):")
patrLineEdit = QtGui.QLineEdit()
patrLabel.setBuddy(patrLineEdit)
cotypLabel = QtGui.QLabel(u"Τύπος επιχείρησης:")
cotyp = QtGui.QComboBox()
cotypLabel.setBuddy(cotyp)
cotyp.addItems([u'1.Νομικό Πρόσωπο',u'2.Φυσικό Πρόσωπο'])
self.registerField('fname*', self.fileNameLineEdit)
layout = QtGui.QGridLayout()
layout.addWidget(fileNameLabel, 0, 0)
layout.addLayout(fileLayout, 0, 1)
self.setLayout(layout)
def fSave(self):
fileName = QtGui.QFileDialog.getSaveFileName(self,
"QFileDialog.getSaveFileName()",
self.field('fname'),
"payroll m13 (*.m13)", QtGui.QFileDialog.Options())
if fileName:
self.fileNameLineEdit.setText(fileName)
class finalPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(finalPage, self).__init__(parent)
self.setButtonText(QtGui.QWizard.BackButton,u'< Πίσω')
self.setButtonText(QtGui.QWizard.FinishButton,u'Ολοκλήρωση')
self.setButtonText(QtGui.QWizard.CancelButton,u'Ακύρωση')
self.setTitle(u"Δημιουργία αρχείου ")
#self.setPixmap(QtGui.QWizard.WatermarkPixmap, QtGui.QPixmap(':/watermark2'))
self.label = QtGui.QLabel()
self.label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
self.setLayout(layout)
def initializePage(self):
finishText = self.wizard().buttonText(QtGui.QWizard.FinishButton)
finishText.replace('&', '')
txt = u'Προσοχή , θα δημιουργηθεί αρχείο μισθοδοσίας με τις παρακάτω παραμέτρους :\n\n'
txt += u'Στοιχεία Επιχείρησης : %s \n\n' % self.field('epon')
txt += u'Όνομα Αρχείου : %s \n\n' % self.field('fname')
txt += u"\nΠατήστε %s για να ολοκληρωθεί η διαδικασία." % finishText
txt += u"\n\nΜε την ολοκλήρωση της διαδικασίας το νέο αρχείο είναι έτοιμο για εισαγωγή δεδομένων!!!"
self.label.setText(txt)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
wizard = NewDbWizard()
wizard.show()
sys.exit(app.exec_())
| gpl-3.0 | 6,554,847,959,938,570,000 | 37.832061 | 129 | 0.590009 | false | 2.875459 | false | false | false |
denex/snafucator | Python/pin/pin_generator.py | 1 | 1028 | def _create_pins():
"""
:rtype: Iterable[int]
"""
middle = 5000
for i in range(0, 2 * middle):
if i % 2 == 0:
yield middle - i // 2 - 1
else:
yield middle + i // 2
PINS = tuple(_create_pins())
assert len(PINS) == 10000, "Len = %d" % len(PINS)
assert min(PINS) == 0000
assert max(PINS) == 9999
def get_pin_index(pin):
return 0 if pin is None else PINS.index(pin)
def pin_generator(last_pin=None):
"""
:type last_pin: int or None
:rtype: Iterable[int]
"""
start_pos = get_pin_index(last_pin) + 1 if last_pin is not None else 0
for i in range(start_pos, len(PINS)):
yield PINS[i]
def test_selector():
print(get_pin_index(6000))
l1 = list(pin_generator(last_pin=9997))
assert len(frozenset(l1)) == 4
l2 = list(pin_generator(last_pin=4999))
assert len(frozenset(l2)) == 9999
l3 = list(pin_generator(last_pin=5000))
assert len(frozenset(l3)) == 9998
if __name__ == '__main__':
test_selector()
| gpl-3.0 | 7,676,437,677,112,655,000 | 20.87234 | 74 | 0.571984 | false | 3.005848 | false | false | false |
nodesign/weioMinima | weioLib/weioParser.py | 1 | 10962 | ###
#
# WEIO Web Of Things Platform
# Copyright (C) 2013 Nodesign.net, Uros PETREVSKI, Drasko DRASKOVIC
# All rights reserved
#
# ## ## ######## #### #######
# ## ## ## ## ## ## ##
# ## ## ## ## ## ## ##
# ## ## ## ###### ## ## ##
# ## ## ## ## ## ## ##
# ## ## ## ## ## ## ##
# ### ### ######## #### #######
#
# Web Of Things Platform
#
# This file is part of WEIO and is published under BSD license.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by the WeIO project.
# 4. Neither the name of the WeIO nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY WEIO PROJECT AUTHORS AND CONTRIBUTORS ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL WEIO PROJECT AUTHORS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors :
# Uros PETREVSKI <uros@nodesign.net>
# Drasko DRASKOVIC <drasko.draskovic@gmail.com>
#
###
from weioLib.weioIO import *
from weioUserApi import serverPush
from weioLib import weioRunnerGlobals
import platform, sys
# WeIO API bindings from websocket to lower levels
# Each data argument is array of data
# Return value is dictionary
def callPinMode(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
pinMode(data[0],data[1])
else :
print "pinMode ON PC", data
return None
def callPortMode(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
portMode(data[0],data[1])
else :
print "pinMode ON PC", data
return None
def callDigitalWrite(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
digitalWrite(data[0], data[1])
else :
print "digitalWrite ON PC", data
return None
def callDigitalRead(data) :
bck = {}
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
value = digitalRead(data[0])
bck["data"] = value
bck["pin"] = data[0]
else :
print "digitalRead ON PC", data
bck["data"] = 1 # faked value
bck["pin"] = data[0] # pin
return bck
def callPulseIn(data) :
bck = {}
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
value = pulseIn(data[0], data[1], data[2])
bck["data"] = value
bck["pin"] = data[0]
bck["level"] = data[1]
bck["timeout"] = data[1]
else :
print "pulseIn ON PC", data
bck["data"] = 1 # faked value
bck["pin"] = data[0] # pin
bck["level"] = data[1] # level
bck["timeout"] = data[2] # timeout
return bck
def callPortWrite(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
portWrite(data[0], data[1])
else :
print "portWrite ON PC", data
return None
def callPortRead(data) :
bck = {}
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
value = portRead(data[0])
bck["data"] = value
bck["port"] = data[0]
else :
print "digitalRead ON PC", data
bck["data"] = 1 # faked value
bck["port"] = data[0] # pin
return bck
def callDHTRead(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
dhtRead(data[0])
else :
print "dhtRead ON PC", data
return None
def callAnalogRead(data) :
bck = {}
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
#print "From browser ", data
value = analogRead(data[0]) # this is pin number
bck["data"] = value
bck["pin"] = data[0]
else :
print "analogRead ON PC", data
bck["data"] = 1023 # faked value
bck["pin"] = data[0]
return bck
def callSetPwmPeriod(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
setPwmPeriod(data[0],data[1])
else:
print "setPwmPeriod ON PC", data
return None
# def callSetPwmLimit(data) :
# if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
# setPwmLimit(data[0])
# else:
# print "setPwmLimit ON PC", data
# return None
def callPwmWrite(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
pwmWrite(data[0], data[1])
else :
print "pwmWrite ON PC", data
return None
def callProportion(data) :
bck = {}
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
#print "From browser ", data
value = proportion(data[0],data[1],data[2],data[3],data[4])
bck["data"] = value
else :
print "proportion ON PC", data
bck["data"] = data
return bck
def callAttachInterrupt(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
iObj = {"pin" : data[0], "jsCallbackString" : data[2]}
attachInterrupt(data[0], data[1], genericInterrupt, iObj)
else:
print "attachInterrupt ON PC", data
return None
def callDetachInterrupt(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
detachInterrupt(data[0])
else:
print "detachInterrupt ON PC", data
return None
def genericInterrupt(event, obj):
bck = {}
bck["data"] = obj["pin"]
bck["eventType"] = getInterruptType(event["type"])
serverPush(obj["jsCallbackString"], bck)
def callDelay(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
delay(data[0])
else :
print "delay ON PC", data
return None
def callTone(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
print "TONE VALS", len(data)
if (len(data)==2):
tone(data[0], data[1])
elif (len(data)==3):
tone(data[0], data[1], data[2])
else :
print "tone ON PC", data
return None
def callNotone(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
noTone(data[0])
else :
print "notone ON PC", data
return None
def callConstrain(data) :
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
constrain(data[0], data[1], data[2],)
bck["data"] = value
else :
print "contrain ON PC", data
bck["data"] = 1 # faked value
bck["pin"] = data[0] # pin
return bck
def callMillis(data) :
bck = {}
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
value = millis()
bck["data"] = value
else :
print "millis ON PC", data
bck["data"] = 0 # faked value
return bck
def callGetTemperature(data):
bck = {}
if (weioRunnerGlobals.WEIO_SERIAL_LINKED is True):
value = getTemperature()
bck["data"] = value
else :
print "getTemperature ON PC", data
bck["data"] = 0 # faked value
return bck
def callUserMesage(data):
print "USER TALKS", data
#weioRunnerGlobals.userMain
def pinsInfo(data) :
bck = {}
bck["data"] = weioRunnerGlobals.DECLARED_PINS
#print("GET PIN INFO ASKED!", bck["data"])
return bck
def callListSerials(data):
bck = {}
bck["data"] = listSerials()
return bck
# UART SECTION
clientSerial = None
def callInitSerial(data):
global clientSerial
if (clientSerial is None) :
clientSerial = initSerial(data[0], data[1])
def callSerialWrite(data):
global clientSerial
if not(clientSerial is None) :
clientSerial.write(data)
else :
sys.stderr.write("Serial port is not initialized. Use initSerial function first")
def callSerialRead(data):
global clientSerial
bck = {}
if not(clientSerial is None) :
bck["data"] = clientSerial.read()
else :
sys.stderr.write("Serial port is not initialized. Use initSerial function first")
return bck
# SPI SECTION
SPI = None
def callInitSPI(data):
global SPI
if (SPI is None) :
SPI = initSPI(data[0])
def callWriteSPI(data):
global SPI
if not(SPI is None) :
SPI.write(data[0])
else :
sys.stderr.write("SPI port is not initialized. Use initSerial function first")
def callReadSPI(data):
global SPI
bck = {}
if not(SPI is None) :
bck["data"] = SPI.read(data[0])
else :
sys.stderr.write("SPI port is not initialized. Use initSerial function first")
return bck
###
# WeIO native spells
###
weioSpells = {
"digitalWrite" :callDigitalWrite,
"digitalRead" :callDigitalRead,
"pulseIn" :callPulseIn,
"portWrite" :callPortWrite,
"portRead" :callPortRead,
"dhtRead" :callDHTRead,
"analogRead" :callAnalogRead,
"pinMode" :callPinMode,
"portMode" :callPortMode,
"setPwmPeriod" :callSetPwmPeriod,
"pwmWrite" :callPwmWrite,
"proportion" :callProportion,
"attachInterrupt" :callAttachInterrupt,
"detachInterrupt" :callDetachInterrupt,
"tone" :callTone,
"noTone" :callNotone,
"constrain" :callConstrain,
"millis" :callMillis,
"getTemperature" :callGetTemperature,
"delay" :callDelay,
"pinsInfo" :pinsInfo,
"listSerials" :callListSerials,
"initSerial" :callInitSerial,
"serialWrite" :callSerialWrite,
"initSPI" :callInitSPI,
"readSPI" :callReadSPI,
"writeSPI" :callWriteSPI
# "message":callUserMesage
}
###
# User added spells (handlers)
###
weioUserSpells = {}
def addUserEvent(event, handler):
global weioUserSpells
#print "Adding event ", event
#print "and handler ", handler
weioUserSpells[event] = handler
def removeUserEvents():
global weioUserSpells
weioUserSpells.clear()
| bsd-3-clause | -1,069,625,149,442,586,900 | 29.032877 | 89 | 0.614213 | false | 3.386469 | false | false | false |
wahaha02/myblog | blog/templatetags/highlight.py | 1 | 1181 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from django.template import Library
import re
DEBUG = False
register = Library()
@register.filter
def highlight_format(value):
p_sub = re.compile('__codestart__ (\w+)')
value = p_sub.sub(r'<pre name="code" class="\g<1>">', value)
p_sub = re.compile(r'__codeend__', re.VERBOSE)
value = p_sub.sub(r'</pre>', value)
if DEBUG:
print value
print '+' * 80
p_highlight = re.compile(r'(<pre name="code" class="\w+">)(?P<codeblock>.*)(</pre>)', re.S)
f_list = p_highlight.findall(value)
if f_list:
s_list = p_highlight.split(value)
if DEBUG:
for i in s_list:
print i
print '=' * 80
for code_block in p_highlight.finditer(value):
code = code_block.group('codeblock')
index = s_list.index(code)
code = code.replace('<', '<')
code = code.replace('>', '>')
code = code.replace('&', '&')
code = code.replace('<p>', '')
code = code.replace('</p>', '')
s_list[index] = code
value = ''.join(s_list)
return value
| bsd-3-clause | 7,682,443,373,309,620,000 | 28.525 | 95 | 0.515665 | false | 3.473529 | false | false | false |
OmkarPathak/Python-Programs | CompetitiveProgramming/HackerEarth/Algorithms/String/P11_CaesarsCipher.py | 1 | 2255 | # Caesar's Cipher is a very famous encryption technique used in cryptography. It is a type of substitution
# cipher in which each letter in the plaintext is replaced by a letter some fixed number of positions down
# the alphabet. For example, with a shift of 3, D would be replaced by G, E would become H, X would become A
# and so on.
#
# Encryption of a letter X by a shift K can be described mathematically as
# EK(X)=(X+K) % 26.
#
# Given a plaintext and it's corresponding ciphertext, output the minimum non-negative value of shift that was
# used to encrypt the plaintext or else output −1 if it is not possible to obtain the given ciphertext from
# the given plaintext using Caesar's Cipher technique.
#
# Input:
#
# The first line of the input contains Q, denoting the number of queries.
#
# The next Q lines contain two strings S and T consisting of only upper-case letters.
#
# Output:
#
# For each test-case, output a single non-negative integer denoting the minimum value of shift that was used
# to encrypt the plaintext or else print −1 if the answer doesn't exist.
#
# Constraints:
# 1≤Q≤5
# 1≤|S|≤10^5
# 1≤|T|≤10^5
# |S| = |T|
#
# SAMPLE INPUT
# 2
# ABC
# DEF
# AAA
# PQR
#
# SAMPLE OUTPUT
# 3
# -1
# My Solution
for _ in range(int(input())):
string_one = input()
string_two= input()
check_one = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# ZYXWVUTSRQPONMLKJIHGFEDCBA
check_two = check_one[::-1]
result = []
for i in range(len(string_one)):
if(check_one.find(string_one[i]) > check_one.find(string_two[i])):
result.append(check_two.find(string_one[i]) + check_one.find(string_two[i]) + 1)
else:
result.append(check_one.find(string_two[i]) - check_one.find(string_one[i]))
if result.count(result[0]) == len(string_one):
print(result[0])
else:
print(-1)
# More Efficient Solution:
tests = int(input().strip())
for i in range(tests):
plain = input().strip()
cipher = input().strip()
shift = (ord(cipher[0])-ord(plain[0])+26)%26
valid = True
for j in range(len(plain)):
if (ord(cipher[j])-ord(plain[j])+26)%26 != shift:
valid = False
break
print(shift) if valid else print("-1")
| gpl-3.0 | -6,465,062,925,664,911,000 | 30.535211 | 110 | 0.659669 | false | 3.203147 | false | false | false |
lmprice/ansible | lib/ansible/playbook/role/__init__.py | 13 | 18914 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
import os
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleAssertionError
from ansible.module_utils.six import iteritems, binary_type, text_type
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.conditional import Conditional
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.role.metadata import RoleMetadata
from ansible.playbook.taggable import Taggable
from ansible.plugins.loader import get_all_plugin_loaders
from ansible.utils.vars import combine_vars
__all__ = ['Role', 'hash_params']
# TODO: this should be a utility function, but can't be a member of
# the role due to the fact that it would require the use of self
# in a static method. This is also used in the base class for
# strategies (ansible/plugins/strategy/__init__.py)
def hash_params(params):
"""
Construct a data structure of parameters that is hashable.
This requires changing any mutable data structures into immutable ones.
We chose a frozenset because role parameters have to be unique.
.. warning:: this does not handle unhashable scalars. Two things
mitigate that limitation:
1) There shouldn't be any unhashable scalars specified in the yaml
2) Our only choice would be to return an error anyway.
"""
# Any container is unhashable if it contains unhashable items (for
# instance, tuple() is a Hashable subclass but if it contains a dict, it
# cannot be hashed)
if isinstance(params, collections.Container) and not isinstance(params, (text_type, binary_type)):
if isinstance(params, collections.Mapping):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params.items())
except TypeError:
new_params = set()
for k, v in params.items():
# Hash each entry individually
new_params.update((k, hash_params(v)))
new_params = frozenset(new_params)
elif isinstance(params, (collections.Set, collections.Sequence)):
try:
# Optimistically hope the contents are all hashable
new_params = frozenset(params)
except TypeError:
new_params = set()
for v in params:
# Hash each entry individually
new_params.update(hash_params(v))
new_params = frozenset(new_params)
else:
# This is just a guess.
new_params = frozenset(params)
return new_params
# Note: We do not handle unhashable scalars but our only choice would be
# to raise an error there anyway.
return frozenset((params,))
class Role(Base, Become, Conditional, Taggable):
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, from_files=None):
self._role_name = None
self._role_path = None
self._role_params = dict()
self._loader = None
self._metadata = None
self._play = play
self._parents = []
self._dependencies = []
self._task_blocks = []
self._handler_blocks = []
self._default_vars = dict()
self._role_vars = dict()
self._had_task_run = dict()
self._completed = dict()
if from_files is None:
from_files = {}
self._from_files = from_files
super(Role, self).__init__()
def __repr__(self):
return self.get_name()
def get_name(self):
return self._role_name
@staticmethod
def load(role_include, play, parent_role=None, from_files=None):
if from_files is None:
from_files = {}
try:
# The ROLE_CACHE is a dictionary of role names, with each entry
# containing another dictionary corresponding to a set of parameters
# specified for a role as the key and the Role() object itself.
# We use frozenset to make the dictionary hashable.
params = role_include.get_role_params()
if role_include.when is not None:
params['when'] = role_include.when
if role_include.tags is not None:
params['tags'] = role_include.tags
if from_files is not None:
params['from_files'] = from_files
if role_include.vars:
params['vars'] = role_include.vars
hashed_params = hash_params(params)
if role_include.role in play.ROLE_CACHE:
for (entry, role_obj) in iteritems(play.ROLE_CACHE[role_include.role]):
if hashed_params == entry:
if parent_role:
role_obj.add_parent(parent_role)
return role_obj
r = Role(play=play, from_files=from_files)
r._load_role_data(role_include, parent_role=parent_role)
if role_include.role not in play.ROLE_CACHE:
play.ROLE_CACHE[role_include.role] = dict()
play.ROLE_CACHE[role_include.role][hashed_params] = r
return r
except RuntimeError:
raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
obj=role_include._ds)
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
self._role_path = role_include.get_role_path()
self._role_params = role_include.get_role_params()
self._variable_manager = role_include.get_variable_manager()
self._loader = role_include.get_loader()
if parent_role:
self.add_parent(parent_role)
# copy over all field attributes, except for when and tags, which
# are special cases and need to preserve pre-existing values
for (attr_name, _) in iteritems(self._valid_attrs):
if attr_name not in ('when', 'tags'):
setattr(self, attr_name, getattr(role_include, attr_name))
current_when = getattr(self, 'when')[:]
current_when.extend(role_include.when)
setattr(self, 'when', current_when)
current_tags = getattr(self, 'tags')[:]
current_tags.extend(role_include.tags)
setattr(self, 'tags', current_tags)
# dynamically load any plugins from the role directory
for name, obj in get_all_plugin_loaders():
if obj.subdir:
plugin_path = os.path.join(self._role_path, obj.subdir)
if os.path.isdir(plugin_path):
obj.add_directory(plugin_path)
# load the role's other files, if they exist
metadata = self._load_role_yaml('meta')
if metadata:
self._metadata = RoleMetadata.load(metadata, owner=self, variable_manager=self._variable_manager, loader=self._loader)
self._dependencies = self._load_dependencies()
else:
self._metadata = RoleMetadata()
task_data = self._load_role_yaml('tasks', main=self._from_files.get('tasks'))
if task_data:
try:
self._task_blocks = load_list_of_blocks(task_data, play=self._play, role=self, loader=self._loader, variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The tasks/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=task_data, orig_exc=e)
handler_data = self._load_role_yaml('handlers')
if handler_data:
try:
self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
variable_manager=self._variable_manager)
except AssertionError as e:
raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name,
obj=handler_data, orig_exc=e)
# vars and default vars are regular dictionaries
self._role_vars = self._load_role_yaml('vars', main=self._from_files.get('vars'), allow_dir=True)
if self._role_vars is None:
self._role_vars = dict()
elif not isinstance(self._role_vars, dict):
raise AnsibleParserError("The vars/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
self._default_vars = self._load_role_yaml('defaults', main=self._from_files.get('defaults'), allow_dir=True)
if self._default_vars is None:
self._default_vars = dict()
elif not isinstance(self._default_vars, dict):
raise AnsibleParserError("The defaults/main.yml file for role '%s' must contain a dictionary of variables" % self._role_name)
def _load_role_yaml(self, subdir, main=None, allow_dir=False):
file_path = os.path.join(self._role_path, subdir)
if self._loader.path_exists(file_path) and self._loader.is_directory(file_path):
# Valid extensions and ordering for roles is hard-coded to maintain
# role portability
extensions = ['.yml', '.yaml', '.json']
# If no <main> is specified by the user, look for files with
# extensions before bare name. Otherwise, look for bare name first.
if main is None:
_main = 'main'
extensions.append('')
else:
_main = main
extensions.insert(0, '')
found_files = self._loader.find_vars_files(file_path, _main, extensions, allow_dir)
if found_files:
data = {}
for found in found_files:
new_data = self._loader.load_from_file(found)
if new_data and allow_dir:
data = combine_vars(data, new_data)
else:
data = new_data
return data
elif main is not None:
raise AnsibleParserError("Could not find specified file in role: %s/%s" % (subdir, main))
return None
def _load_dependencies(self):
'''
Recursively loads role dependencies from the metadata list of
dependencies, if it exists
'''
deps = []
if self._metadata:
for role_include in self._metadata.dependencies:
r = Role.load(role_include, play=self._play, parent_role=self)
deps.append(r)
return deps
# other functions
def add_parent(self, parent_role):
''' adds a role to the list of this roles parents '''
if not isinstance(parent_role, Role):
raise AnsibleAssertionError()
if parent_role not in self._parents:
self._parents.append(parent_role)
def get_parents(self):
return self._parents
def get_default_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
default_vars = dict()
for dep in self.get_all_dependencies():
default_vars = combine_vars(default_vars, dep.get_default_vars())
if dep_chain:
for parent in dep_chain:
default_vars = combine_vars(default_vars, parent._default_vars)
default_vars = combine_vars(default_vars, self._default_vars)
return default_vars
def get_inherited_vars(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
inherited_vars = dict()
if dep_chain:
for parent in dep_chain:
inherited_vars = combine_vars(inherited_vars, parent._role_vars)
return inherited_vars
def get_role_params(self, dep_chain=None):
dep_chain = [] if dep_chain is None else dep_chain
params = {}
if dep_chain:
for parent in dep_chain:
params = combine_vars(params, parent._role_params)
params = combine_vars(params, self._role_params)
return params
def get_vars(self, dep_chain=None, include_params=True):
dep_chain = [] if dep_chain is None else dep_chain
all_vars = self.get_inherited_vars(dep_chain)
for dep in self.get_all_dependencies():
all_vars = combine_vars(all_vars, dep.get_vars(include_params=include_params))
all_vars = combine_vars(all_vars, self.vars)
all_vars = combine_vars(all_vars, self._role_vars)
if include_params:
all_vars = combine_vars(all_vars, self.get_role_params(dep_chain=dep_chain))
return all_vars
def get_direct_dependencies(self):
return self._dependencies[:]
def get_all_dependencies(self):
'''
Returns a list of all deps, built recursively from all child dependencies,
in the proper order in which they should be executed or evaluated.
'''
child_deps = []
for dep in self.get_direct_dependencies():
for child_dep in dep.get_all_dependencies():
child_deps.append(child_dep)
child_deps.append(dep)
return child_deps
def get_task_blocks(self):
return self._task_blocks[:]
def get_handler_blocks(self, play, dep_chain=None):
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
for dep in self.get_direct_dependencies():
dep_blocks = dep.get_handler_blocks(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for task_block in self._handler_blocks:
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
block_list.append(new_task_block)
return block_list
def has_run(self, host):
'''
Returns true if this role has been iterated over completely and
at least one task was run
'''
return host.name in self._completed and not self._metadata.allow_duplicates
def compile(self, play, dep_chain=None):
'''
Returns the task list for this role, which is created by first
recursively compiling the tasks for all direct dependencies, and
then adding on the tasks for this role.
The role compile() also remembers and saves the dependency chain
with each task, so tasks know by which route they were found, and
can correctly take their parent's tags/conditionals into account.
'''
block_list = []
# update the dependency chain here
if dep_chain is None:
dep_chain = []
new_dep_chain = dep_chain + [self]
deps = self.get_direct_dependencies()
for dep in deps:
dep_blocks = dep.compile(play=play, dep_chain=new_dep_chain)
block_list.extend(dep_blocks)
for idx, task_block in enumerate(self._task_blocks):
new_task_block = task_block.copy()
new_task_block._dep_chain = new_dep_chain
new_task_block._play = play
if idx == len(self._task_blocks) - 1:
new_task_block._eor = True
block_list.append(new_task_block)
return block_list
def serialize(self, include_deps=True):
res = super(Role, self).serialize()
res['_role_name'] = self._role_name
res['_role_path'] = self._role_path
res['_role_vars'] = self._role_vars
res['_role_params'] = self._role_params
res['_default_vars'] = self._default_vars
res['_had_task_run'] = self._had_task_run.copy()
res['_completed'] = self._completed.copy()
if self._metadata:
res['_metadata'] = self._metadata.serialize()
if include_deps:
deps = []
for role in self.get_direct_dependencies():
deps.append(role.serialize())
res['_dependencies'] = deps
parents = []
for parent in self._parents:
parents.append(parent.serialize(include_deps=False))
res['_parents'] = parents
return res
def deserialize(self, data, include_deps=True):
self._role_name = data.get('_role_name', '')
self._role_path = data.get('_role_path', '')
self._role_vars = data.get('_role_vars', dict())
self._role_params = data.get('_role_params', dict())
self._default_vars = data.get('_default_vars', dict())
self._had_task_run = data.get('_had_task_run', dict())
self._completed = data.get('_completed', dict())
if include_deps:
deps = []
for dep in data.get('_dependencies', []):
r = Role()
r.deserialize(dep)
deps.append(r)
setattr(self, '_dependencies', deps)
parent_data = data.get('_parents', [])
parents = []
for parent in parent_data:
r = Role()
r.deserialize(parent, include_deps=False)
parents.append(r)
setattr(self, '_parents', parents)
metadata_data = data.get('_metadata')
if metadata_data:
m = RoleMetadata()
m.deserialize(metadata_data)
self._metadata = m
super(Role, self).deserialize(data)
def set_loader(self, loader):
self._loader = loader
for parent in self._parents:
parent.set_loader(loader)
for dep in self.get_direct_dependencies():
dep.set_loader(loader)
| gpl-3.0 | -2,407,202,554,963,732,500 | 37.837782 | 156 | 0.596437 | false | 4.219991 | false | false | false |
shaih/HElib | utils/tests/diff-threshold.py | 1 | 3144 | #!/usr/bin/env python3
# Copyright (C) 2020 IBM Corp.
# This program is Licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
import argparse
import sys
import ast
import math
def diff_float(na, nb, threshold):
for a, b in zip(na, nb):
if not math.isclose(a, b, abs_tol=threshold):
raise ValueError(f"Difference {a - b} between {a} and {b} "
f"exceeds threshold {threshold}.")
def makeSameSize(a, b, max_length):
lenA, lenB = len(a), len(b)
if lenA > max_length or lenB > max_length:
raise ValueError(f"Size of slots for {a}({lenA}) {b}({lenB}) "
f"> {max_length}.")
if lenA == lenB:
return a, b
else:
maxSz = max(lenA, lenB)
a += [0] * (maxSz - lenA)
b += [0] * (maxSz - lenB)
return (a, b)
def parseCorrectly(la, lb, decrypt):
error_msg = "Type mismatch. {0}({1}) and {2}({3}) type do not match."
if decrypt:
for a, b in zip(la, lb):
a, b = ast.literal_eval(a), ast.literal_eval(b)
if type(a) is not type(b):
raise TypeError(error_msg.format(a, type(a), b, type(b)))
yield a, b
else:
for a, b in zip(la, lb):
a = [[ float(i) for i in a.split(",") ]]
b = [[ float(i) for i in b.split(",") ]]
if type(a) is not type(b):
raise TypeError(error_msg.format(a, type(a), b, type(b)))
yield a, b
def main():
parser = argparse.ArgumentParser()
parser.add_argument("firstfile", help="first data file", type=str)
parser.add_argument("secondfile", help="second data file", type=str)
parser.add_argument("--decrypt", help="diff decrypt format (instead of decode)",
action='store_true')
parser.add_argument("--threshold", help="error threshold [default=0.001]",
type=float, default=0.001)
args = parser.parse_args()
with open(args.firstfile, 'r') as f1, open(args.secondfile, 'r') as f2:
l1, l2 = list(f1), list(f2)
if len(l1) != len(l2):
sys.exit(f"Different number of lines. "
f"First contains {len(l1)} second contains {len(l2)}.")
if l1[0] != l2[0]:
sys.exit(f"File headers differ. {l1[0]} {l2[0]}.")
try:
for a, b in parseCorrectly(l1[1:], l2[1:], args.decrypt):
for sa, sb in zip(a, b):
sa, sb = makeSameSize(sa, sb, 2)
diff_float(sa, sb, args.threshold)
except (TypeError, ValueError) as e:
sys.exit(str(e))
if __name__ == "__main__":
main()
| apache-2.0 | -4,331,664,892,367,646,000 | 36.428571 | 84 | 0.57729 | false | 3.35539 | false | false | false |
Videonauth/passgen | tool/keyboard_list_generator.py | 1 | 2612 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
############################################################################
#
# keyboard_list_generator.py
#
############################################################################
#
# Author: Videonauth <videonauth@googlemail.com>
# Date: 09.07.2016
# Purpose:
# Generate a word-list for the keyboard sequence check.
# Written for: Python 3.5.1
#
############################################################################
de_lowercase = "qwertzuiopü+asdfghjkllöä#yxcvbnm,.-"
de_uppercase = "°!§$%&/()=?WERTZUIOPÜ*ASDFGHJKLÖÄ'YXCVBNM;:_"
en_lowercase = "-=qwertyuiop[]asdfghjkl;'zxcvbnm,./"
en_uppercase = "~!@#$%^&*()_QWERTYUIOP{}ASDFGHJKL:ZXCVBNM<>?"
# next line might error out if destination file does not exist
with open('../lists/keyboard.wl', 'r+') as file:
for a in range(3, len(de_lowercase) + 1):
for b in range(len(de_lowercase)):
if len(de_lowercase[b: b + a]) == a:
file.write(de_lowercase[b: b + a] + '\n')
for a in range(3, len(de_uppercase) + 1):
for b in range(len(de_uppercase)):
if len(de_uppercase[b: b + a]) == a:
file.write(de_uppercase[b: b + a] + '\n')
for a in range(3, len(en_lowercase) + 1):
for b in range(len(en_lowercase)):
if len(en_lowercase[b: b + a]) == a:
file.write(en_lowercase[b: b + a] + '\n')
for a in range(3, len(en_uppercase) + 1):
for b in range(len(en_uppercase)):
if len(en_uppercase[b: b + a]) == a:
file.write(en_uppercase[b: b + a] + '\n')
de_lowercasere = de_lowercase[:: -1]
de_uppercasere = de_uppercase[:: -1]
en_lowercasere = en_lowercase[:: -1]
en_uppercasere = en_uppercase[:: -1]
for a in range(3, len(de_lowercasere) + 1):
for b in range(len(de_lowercasere)):
if len(de_lowercasere[b: b + a]) == a:
file.write(de_lowercasere[b: b + a] + '\n')
for a in range(3, len(de_uppercasere) + 1):
for b in range(len(de_uppercasere)):
if len(de_uppercasere[b: b + a]) == a:
file.write(de_uppercasere[b: b + a] + '\n')
for a in range(3, len(en_lowercasere) + 1):
for b in range(len(en_lowercasere)):
if len(en_lowercasere[b: b + a]) == a:
file.write(en_lowercasere[b: b + a] + '\n')
for a in range(3, len(en_uppercasere) + 1):
for b in range(len(en_uppercasere)):
if len(en_uppercasere[b: b + a]) == a:
file.write(en_uppercasere[b: b + a] + '\n')
file.close()
| mit | 7,557,594,228,450,199,000 | 41.688525 | 76 | 0.504224 | false | 3.059929 | false | false | false |
CNR-ISMAR/rectifiedgrid | rectifiedgrid/hillshade.py | 1 | 1108 | from matplotlib.colors import LightSource
# Adapted from https://github.com/jobar8/graphics
def alpha_blend(rgb, intensity, alpha=0.7):
return alpha * rgb + (1 - alpha) * intensity
def get_hs(data,
cmap,
norm=None,
zf=10,
azdeg=315,
altdeg=45,
dx=1,
dy=1,
fraction=1.5,
blend_mode='alpha',
alpha=0.7,
**kwargs_norm):
ls = LightSource(azdeg, altdeg)
if blend_mode == 'alpha':
# transparency blending
rgb = ls.shade(data, cmap=cmap,
norm=norm,
blend_mode=alpha_blend, vert_exag=zf, dx=dx, dy=dy,
fraction=fraction, alpha=alpha, **kwargs_norm)
else:
rgb = ls.shade(data,
cmap=cmap,
norm=norm,
blend_mode=blend_mode,
vert_exag=zf,
dx=dx,
dy=dy,
fraction=fraction,
**kwargs_norm)
return rgb
| gpl-3.0 | 3,936,537,537,660,941,000 | 28.157895 | 74 | 0.447653 | false | 3.971326 | false | false | false |
HyechurnJang/pygics | sample/simple_database.py | 1 | 1764 | # -*- coding: utf-8 -*-
'''
____ ___ ____________ ___ ___ ____ _________________
/ __ \/ _ | / __/ _/ __/ / _ \/ _ \/ __ \__ / / __/ ___/_ __/
/ /_/ / __ |_\ \_/ /_\ \ / ___/ , _/ /_/ / // / _// /__ / /
\____/_/ |_/___/___/___/ /_/ /_/|_|\____/\___/___/\___/ /_/
Operational Aid Source for Infra-Structure
Created on 2020. 3. 18..
@author: Hye-Churn Jang, CMBU Specialist in Korea, VMware [jangh@vmware.com]
'''
#===============================================================================
# Prepare PostgreSQL Server
#===============================================================================
# docker run --name postgres -p 5432:5432 -e POSTGRES_PASSWORD=password -e POSTGRES_USER=pygics -e POSTGRES_DB=pygicsdb -d postgres
from pygics import load, logInfo
load('modules.postgres')
# Login Database
SDK.PygicsDB.system('localhost:5432', 'pygics', 'password')
# "User" Table at "PygicsDB" Database
User = SDK.PygicsDB.User
logInfo('Create Users')
with SDK.PygicsDB: # Open Transaction for Create Records
User('Tony', 'Tony Stark', 'IronMan')
User('Peter', 'Peter Parker', 'SpiderMan')
User('Peter', 'Peter Pan', 'Elf')
logInfo('Get All Users\n{}'.format(User.list()))
# query form based SQLAlchemy
logInfo('Find All Peters\n{}'.format(User.list(User.name == 'Peter', order='id')))
with SDK.PygicsDB: # Open Transaction
tony = User.list(User.name == 'Tony')[0]
tony.nickname = 'Avengers Leader' # Update Data
tony.update()
logInfo('Check Tony Changed\n{}'.format(User.list(User.name == 'Tony')))
logInfo('Delete All Users')
with SDK.PygicsDB: # Open Transaction for Delete
for user in User.list():
user.delete()
logInfo('Check Users Empty\n{}'.format(User.list()))
| apache-2.0 | -6,248,814,847,715,503,000 | 34.28 | 131 | 0.521542 | false | 3.005111 | false | false | false |
markvdw/GParML | scg_adapted_local_MapReduce.py | 2 | 8715 | '''
A bunch of support functions used for SCG optimisation. They depend on the
parallel implementation framework, but may change for other optimisers.
'''
import glob
import time
import numpy
from os.path import splitext
from local_MapReduce import load, save
time_acc = {
'embeddings_set_grads' : [],
'embeddings_get_grads_mu' : [],
'embeddings_get_grads_kappa' : [],
'embeddings_get_grads_theta' : [],
'embeddings_get_grads_current_grad' : [],
'embeddings_get_grads_gamma' : [],
'embeddings_get_grads_max_d' : [],
'embeddings_set_grads_reset_d' : [],
'embeddings_set_grads_update_d' : [],
'embeddings_set_grads_update_X' : [],
'embeddings_set_grads_update_grad_old' : [],
'embeddings_set_grads_update_grad_new' : [],
}
'''
Initialisation for local statistics
'''
def embeddings_set_grads(folder):
'''
Sets the grads and other local statistics often needed for optimisation locally for
each node. This is currently only implemented locally, but could easly be adapted
to the MapReduce framework to be done on remote nodes in parallel. There's no real
need to do this in parallel though, as the computaions taking place are not that
time consuming.
'''
global time_acc
start = time.time()
input_files = sorted(glob.glob(folder + '/*.grad_latest.npy'))
for file_name in input_files:
grads = load(file_name)
#print 'grads'
#print grads
# Save grad new as the latest grad evaluated
new_file = splitext(splitext(file_name)[0])[0] + '.grad_new.npy'
save(new_file, grads)
# Init the old grad to be grad new
new_file = splitext(splitext(file_name)[0])[0] + '.grad_old.npy'
save(new_file, grads)
# Save the direction as the negative grad
new_file = splitext(splitext(file_name)[0])[0] + '.grad_d.npy'
save(new_file, -1 * grads)
end = time.time()
time_acc['embeddings_set_grads'] += [end - start]
'''
Getters for local statistics
'''
def embeddings_get_grads_mu(folder):
'''
Get the sum over the inputs of the inner product of the direction and grad_new
'''
global time_acc
start = time.time()
mu = 0
grad_new_files = sorted(glob.glob(folder + '/*.grad_new.npy'))
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
for grad_new_file, grad_d_file in zip(grad_new_files, grad_d_files):
grad_new = load(grad_new_file)
grad_d = load(grad_d_file)
mu += (grad_new * grad_d).sum()
end = time.time()
time_acc['embeddings_get_grads_mu'] += [end - start]
return mu
def embeddings_get_grads_kappa(folder):
'''
Get the sum over the inputs of the inner product of the direction with itself
'''
global time_acc
start = time.time()
kappa = 0
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
for grad_d_file in grad_d_files:
grad_d = load(grad_d_file)
kappa += (grad_d * grad_d).sum()
end = time.time()
time_acc['embeddings_get_grads_kappa'] += [end - start]
return kappa
def embeddings_get_grads_theta(folder):
'''
Get the sum over the inputs of the inner product of the direction and grad_latest
'''
global time_acc
start = time.time()
theta = 0
grad_new_files = sorted(glob.glob(folder + '/*.grad_new.npy'))
grad_latest_files = sorted(glob.glob(folder + '/*.grad_latest.npy'))
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
for grad_latest_file, grad_d_file, grad_new_file in zip(grad_latest_files, grad_d_files, grad_new_files):
grad_latest = load(grad_latest_file)
grad_new = load(grad_new_file)
grad_d = load(grad_d_file)
theta += (grad_d * (grad_latest - grad_new)).sum()
end = time.time()
time_acc['embeddings_get_grads_theta'] += [end - start]
return theta
def embeddings_get_grads_current_grad(folder):
'''
Get the sum over the inputs of the inner product of grad_new with itself
'''
global time_acc
start = time.time()
current_grad = 0
grad_new_files = sorted(glob.glob(folder + '/*.grad_new.npy'))
for grad_new_file in grad_new_files:
grad_new = load(grad_new_file)
current_grad += (grad_new * grad_new).sum()
end = time.time()
time_acc['embeddings_get_grads_current_grad'] += [end - start]
return current_grad
def embeddings_get_grads_gamma(folder):
'''
Get the sum over the inputs of the inner product of grad_old and grad_new
'''
global time_acc
start = time.time()
gamma = 0
grad_new_files = sorted(glob.glob(folder + '/*.grad_new.npy'))
grad_old_files = sorted(glob.glob(folder + '/*.grad_old.npy'))
for grad_new_file, grad_old_file in zip(grad_new_files, grad_old_files):
grad_new = load(grad_new_file)
grad_old = load(grad_old_file)
gamma += (grad_new * grad_old).sum()
end = time.time()
time_acc['embeddings_get_grads_gamma'] += [end - start]
return gamma
def embeddings_get_grads_max_d(folder, alpha):
'''
Get the max abs element of the direction over all input files
'''
global time_acc
start = time.time()
max_d = 0
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
for grad_d_file in grad_d_files:
grad_d = load(grad_d_file)
max_d = max(max_d, numpy.max(numpy.abs(alpha * grad_d)))
end = time.time()
time_acc['embeddings_get_grads_max_d'] += [end - start]
return max_d
'''
Setters for local statistics
'''
def embeddings_set_grads_reset_d(folder):
'''
Reset the direction to be the negative of grad_new
'''
global time_acc
start = time.time()
input_files = sorted(glob.glob(folder + '/*.grad_new.npy'))
for file_name in input_files:
grads = load(file_name)
# Save the direction as the negative grad
new_file = splitext(splitext(file_name)[0])[0] + '.grad_d.npy'
save(new_file, -1 * grads)
end = time.time()
time_acc['embeddings_set_grads_reset_d'] += [end - start]
def embeddings_set_grads_update_d(folder, gamma):
'''
Update the value of the direction for each input to be gamma (given) times the old direction
minus grad_new
'''
global time_acc
start = time.time()
grad_new_files = sorted(glob.glob(folder + '/*.grad_new.npy'))
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
for grad_new_file, grad_d_file in zip(grad_new_files, grad_d_files):
grad_new = load(grad_new_file)
grad_d = load(grad_d_file)
save(grad_d_file, gamma * grad_d - grad_new)
end = time.time()
time_acc['embeddings_set_grads_update_d'] += [end - start]
def embeddings_set_grads_update_X(folder, alpha):
'''
Update the value of the local embeddings and variances themselves to be X + alpha * direction
'''
global time_acc
start = time.time()
grad_d_files = sorted(glob.glob(folder + '/*.grad_d.npy'))
X_mu_files = sorted(glob.glob(folder + '/*.embedding.npy'))
X_S_files = sorted(glob.glob(folder + '/*.variance.npy'))
for grad_d_file, X_mu_file, X_S_file in zip(grad_d_files, X_mu_files, X_S_files):
grad_d = load(grad_d_file)
grad_d_X_mu = grad_d[0]
grad_d_X_S = grad_d[1]
X_mu = load(X_mu_file)
X_S = load(X_S_file)
#print 'X_mu'
#print X_mu
#print 'X_S'
#print X_S
save(X_mu_file, X_mu + alpha * grad_d_X_mu)
save(X_S_file, X_S + alpha * grad_d_X_S)
end = time.time()
time_acc['embeddings_set_grads_update_X'] += [end - start]
def embeddings_set_grads_update_grad_old(folder):
'''
Set grad_old to be grad_new
'''
global time_acc
start = time.time()
input_files = sorted(glob.glob(folder + '/*.grad_new.npy'))
for file_name in input_files:
grads = load(file_name)
# Save grad old as latest grad new
new_file = splitext(splitext(file_name)[0])[0] + '.grad_old.npy'
save(new_file, grads)
end = time.time()
time_acc['embeddings_set_grads_update_grad_old'] += [end - start]
def embeddings_set_grads_update_grad_new(folder):
'''
Set grad_new to be grad_latest (a temp grad that keeps changing every evaluation)
'''
global time_acc
start = time.time()
input_files = sorted(glob.glob(folder + '/*.grad_latest.npy'))
for file_name in input_files:
grads = load(file_name)
# Save grad old as latest grad new
new_file = splitext(splitext(file_name)[0])[0] + '.grad_new.npy'
save(new_file, grads)
end = time.time()
time_acc['embeddings_set_grads_update_grad_new'] += [end - start]
| bsd-3-clause | -4,492,889,811,067,905,000 | 34.864198 | 109 | 0.618589 | false | 3.226583 | true | false | false |
KarolBedkowski/photomagic | photomagick/filters/bw.py | 1 | 1431 | #!usr/bin/python
# -*- coding: utf-8 -*-
__plugins__ = ('BwLuminosity', 'BwGreen', 'BwOrange', 'BwRed', 'BwYellow',
'BwInfrared')
__version__ = '2011-03-20'
__author__ = 'Karol Będkowski'
__copyright__ = "Copyright (c) Karol Będkowski, 2011"
import ImageOps
from photomagick.common import colors
from photomagick.common.base_filter import BaseFilter
from photomagick.common.const import CATEGORY_BASE
class BwLuminosity(BaseFilter):
STEPS = 3
NAME = _("BW Luminosity")
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Start...', image
image = colors.convert_to_luminosity(image)
yield 'Contrast...', image
image = ImageOps.autocontrast(image)
yield 'Done', image
class _BwFilter(BaseFilter):
STEPS = 3
NAME = 'BW Filter'
CATEGORY = CATEGORY_BASE
_COLOR = (1, 1, 1)
def process(self, image):
yield 'Start...', image
image = colors.color_mixer_monochrome(image, *self._COLOR)
yield 'Contrast...', image
image = ImageOps.autocontrast(image)
yield 'Done', image
class BwGreen(_BwFilter):
NAME = _('BW Green Filter')
_COLOR = 0.04, 0.27, 0.08
class BwOrange(_BwFilter):
NAME = _('BW Orange Filter')
_COLOR = (0.31, 0.09, 0)
class BwRed(_BwFilter):
NAME = _('BW Red Filter')
_COLOR = (0.35, 0.04, 0)
class BwYellow(_BwFilter):
NAME = _('BW Yellow Filter')
_COLOR = (0.24, 0.11, 0.05)
class BwInfrared(_BwFilter):
NAME = _('BW Infrared')
_COLOR = (0.15, 1.15, -0.30)
| gpl-2.0 | -3,300,862,538,246,330,000 | 20.651515 | 74 | 0.664801 | false | 2.593466 | false | false | false |
ReedAnders/deepmap | deepmap/nn.py | 1 | 5374 | # Copyright (C) 2016 Reed Anderson.
# From: https://github.com/ReedAnders/deepmap
# License: MIT BY https://opensource.org/licenses/MIT
import pickle, os, binascii
from collections import deque
import numpy as np
from math import exp
from random import random
class NodeMap:
def __init__(self, input_node_population=12, output_node_population=1, latent_node_population=400):
self.coordinate_map = []
self.input_nodes = [InputNode() for node in range(input_node_population)]
self.output_nodes = [OutputNode() for node in range(output_node_population)]
self.latent_nodes = [LatentNode() for node in range(latent_node_population)]
self.all_nodes = self.input_nodes + self.output_nodes + self.latent_nodes
def construct_map(self):
for node in self.all_nodes:
self.coordinate_map.append((node.name, node.coordinates))
for node in self.all_nodes:
node.find_neighbors(self.coordinate_map)
self.update_input_values()
# pickle.dump( self.coordinate_map, open( "pickles/coordinate_map.p", "wb" ) )
# pickle.dump( self.input_nodes, open( "pickles/input_nodes.p", "wb" ) )
# pickle.dump( self.output_nodes, open( "pickles/output_nodes.p", "wb" ) )
# pickle.dump( self.latent_nodes, open( "pickles/latent_nodes.p", "wb" ) )
def calculate_dimensions(self):
n_params = 0
for node in self.all_nodes:
n_params += 2
n_params += len(node.true_neighbor_index)
return n_params
def error(self, correct_labels, predicted_labels):
error = None
pattern_error = []
n_training_patterns = len(correct_labels)
for i in range(n_training_patterns):
_sum = sum([(y-o)**2 for y,o in zip(correct_labels, predicted_labels)])
pattern_error.append(_sum)
error = 1.0/n_training_patterns * sum(pattern_error)
return error
def train(self, training_patterns, param):
n_training_patterns = len(training_patterns)
for i in training_patterns:
n_labels = len(self.output_nodes)
inputs = i[:-n_labels]
c_labels = i[-n_labels:]
p_labels = self.evaluate_topology(inputs, param)
error = self.error(c_labels, p_labels)
fitness = 1 - error
print 'ERROR: %r' % (error)
return error, fitness
def evaluate_topology(self, data, param):
p_labels = []
for index, node in enumerate(self.input_nodes):
node.value = float(data[index])
# Trim parameters
p_len = len(param)
t_len = len(self.latent_nodes + self.output_nodes) * 2
w_len = p_len - t_len
w_para = param[:w_len]
# t_para = deque(param[w_len-2:])
# Evaluate function
for node in self.latent_nodes + self.output_nodes:
self.evaluate_weights(w_para)
t_para = deque(param[w_len-2:])
# for node in self.latent_nodes + self.output_nodes:
# node_topo_params = [t_para.popleft() for _i in range(2)]
# node.eval_neighbors(node_topo_params[0],node_topo_params[1])
# Return predicted labels
p_labels = [node.value for node in self.output_nodes]
return p_labels
def evaluate_weights(self, param):
w_para = deque(param)
for node in self.latent_nodes + self.output_nodes:
neighbors = len(node.true_neighbor_index)
node_weight_params = [w_para.popleft() for _i in range(neighbors)]
node.eval_sigmoid(node_weight_params)
self.update_input_values()
def update_input_values(self):
for node in self.output_nodes + self.latent_nodes:
for index in node.true_neighbor_index:
node.input_values.append(self.all_nodes[index].value)
class Node:
def __init__(self, dimensions=3):
self.name = binascii.b2a_hex(os.urandom(8))
self.coordinates = np.array([random() for i in range(dimensions)])
self.neighbors = []
self.true_neighbor_index = []
self.optimal_neighbor_set = set()
self.value = 0.0
def find_neighbors(self, coordinate_map):
for index, node in enumerate(coordinate_map):
if np.linalg.norm(self.coordinates-node[1]) < 0.3:
self.true_neighbor_index.append(index)
self.neighbors.append((node,True))
else:
self.neighbors.append((node,False))
# Two parameters between -1, 1
def eval_neighbors(self, lower_bound, upper_bound):
for index in self.true_neighbor_index:
dist = np.linalg.norm(self.coordinates-self.neighbors[index][0][1])
if dist > lower_bound and dist < upper_bound:
self.optimal_neighbor_set.add(index)
class InputNode(Node):
def __init__(self):
Node.__init__(self)
class LatentNode(Node):
def __init__(self):
Node.__init__(self)
self.value = random()
self.input_values = []
# Multiple parameters for n weights -1, 1
def eval_sigmoid(self, weights):
x = sum([w*v for w,v in zip(weights, self.input_values)])
self.value = 1 / (1 + exp(-x))
class OutputNode(LatentNode):
def __init__(self):
LatentNode.__init__(self)
| mit | -437,573,294,550,352,260 | 32.798742 | 103 | 0.600298 | false | 3.601877 | false | false | false |
aaronst/macholibre | macholibre/dictionary.py | 1 | 12913 | #!/usr/bin/env python
"""
Copyright 2016 Aaron Stephens <aaronjst93@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# CPU Types, CPU Subtypes, Filetypes, Load Commands, Flags as defined in the
# following official Apple, inc. header files:
# /usr/include/mach/machine.h
# /usr/include/mach-o/loader.h
cert_slots = {
-1: 'root',
0: 'leaf'
}
hashes = {
0: 'No Hash',
1: 'SHA-1',
2: 'SHA-256'
}
segment_flags = {
1: 'HIGHVM',
2: 'FVMLIB',
4: 'NORELOC',
8: 'PROTECTED_VERSION_1'
}
n_types = {
0x0: 'UNDF',
0x2: 'ABS',
0xe: 'SECT',
0xc: 'PBUD',
0xa: 'INDR'
}
machos = {
4277009102: (False, False), # 32 bit, big endian
4277009103: (True, False), # 64 bit, big endian
3472551422: (False, True), # 32 bit, little endian
3489328638: (True, True) # 64 bit, little endian
}
requirements = {
1: 'HostRequirementType',
2: 'GuestRequirementType',
3: 'DesignatedRequirementType',
4: 'LibraryRequirementType',
5: 'PluginRequirementType',
}
indeces = {
0: 'CodeDirectorySlot',
1: 'InfoSlot',
2: 'RequirementsSlot',
3: 'ResourceDirSlot',
4: 'ApplicationSlot',
5: 'EntitlementSlot',
0x10000: 'SignatureSlot'
}
matches = {
0: 'matchExists',
1: 'matchEqual',
2: 'matchContains',
3: 'matchBeginsWith',
4: 'matchEndsWith',
5: 'matchLessThan',
6: 'matchGreaterThan',
7: 'matchLessEqual',
8: 'matchGreaterEqual'
}
protections = {
0b000: '---',
0b001: 'r--',
0b010: '-w-',
0b011: 'rw-',
0b100: '--x',
0b101: 'r-x',
0b110: '-wx',
0b111: 'rwx'
}
signatures = {
'REQUIREMENT': 0xfade0c00,
'REQUIREMENTS': 0xfade0c01,
'CODEDIRECTORY': 0xfade0c02,
'ENTITLEMENT': 0xfade7171,
'BLOBWRAPPER': 0xfade0b01,
'EMBEDDED_SIGNATURE': 0xfade0cc0,
'DETACHED_SIGNATURE': 0xfade0cc1,
'CODE_SIGN_DRS': 0xfade0c05
}
section_attrs = {
0x80000000: 'PURE_INSTRUCTIONS',
0x40000000: 'NO_TOC',
0x20000000: 'STRIP_STATIC_SYMS',
0x10000000: 'NO_DEAD_STRIP',
0x08000000: 'LIVE_SUPPORT',
0x04000000: 'SELF_MODIFYING_CODE',
0x02000000: 'DEBUG',
0x00000400: 'SOME_INSTRUCTIONS',
0x00000200: 'EXT_RELOC',
0x00000100: 'LOC_RELOC'
}
filetypes = {
1: 'OBJECT',
2: 'EXECUTE',
3: 'FVMLIB',
4: 'CORE',
5: 'PRELOAD',
6: 'DYLIB',
7: 'DYLINKER',
8: 'BUNDLE',
9: 'DYLIB_STUB',
10: 'DSYM',
11: 'KEXT_BUNDLE'
}
section_types = {
0x0: 'REGULAR',
0x1: 'ZEROFILL',
0x2: 'CSTRING_LITERALS',
0x3: '4BYTE_LITERALS',
0x4: '8BYTE_LITERALS',
0x5: 'LITERAL_POINTERS',
0x6: 'NON_LAZY_SYMBOL_POINTERS',
0x7: 'LAZY_SYMBOL_POINTERS',
0x8: 'SYMBOL_STUBS',
0x9: 'MOD_INIT_FUNC_POINTERS',
0xa: 'MOD_TERM_FUNC_POINTERS',
0xb: 'COALESCED',
0xc: 'GB_ZEROFILL',
0xd: 'INTERPOSING',
0xe: '16BYTE_LITERALS',
0xf: 'DTRACE_DOF',
0x10: 'LAZY_DYLIB_SYMBOL_POINTERS',
0x11: 'THREAD_LOCAL_REGULAR',
0x12: 'THREAD_LOCAL_ZEROFILL',
0x13: 'THREAD_LOCAL_VARIABLES',
0x14: 'THREAD_LOCAL_VARIABLE_POINTERS',
0x15: 'THREAD_LOCAL_INIT_FUNCTION_POINTERS'
}
operators = {
0: 'False',
1: 'True',
2: 'Ident',
3: 'AppleAnchor',
4: 'AnchorHash',
5: 'InfoKeyValue',
6: 'And',
7: 'Or',
8: 'CDHash',
9: 'Not',
10: 'InfoKeyField',
11: 'CertField',
12: 'TrustedCert',
13: 'TrustedCerts',
14: 'CertGeneric',
15: 'AppleGenericAnchor',
16: 'EntitlementField',
17: 'CertPolicy',
18: 'NamedAnchor',
19: 'NamedCode',
20: 'Platform'
}
thread_states = {
1: 'x86_THREAD_STATE32',
2: 'x86_FLOAT_STATE32',
3: 'x86_EXCEPTION_STATE32',
4: 'x86_THREAD_STATE64',
5: 'x86_FLOAT_STATE64',
6: 'x86_EXCEPTION_STATE64',
7: 'x86_THREAD_STATE',
8: 'x86_FLOAT_STATE',
9: 'x86_EXCEPTION_STATE',
10: 'x86_DEBUG_STATE32',
11: 'x86_DEBUG_STATE64',
12: 'x86_DEBUG_STATE',
13: 'THREAD_STATE_NONE',
14: 'x86_SAVED_STATE_1 (INTERNAL ONLY)',
15: 'x86_SAVED_STATE_2 (INTERNAL ONLY)',
16: 'x86_AVX_STATE32',
17: 'x86_AVX_STATE64',
18: 'x86_AVX_STATE'
}
flags = {
1: 'NOUNDEFS',
2: 'INCRLINK',
4: 'DYLDLINK',
8: 'BINDATLOAD',
16: 'PREBOUND',
32: 'SPLIT_SEGS',
64: 'LAZY_INIT',
128: 'TWOLEVEL',
256: 'FORCE_FLAT',
512: 'NOMULTIDEFS',
1024: 'NOFIXPREBINDING',
2048: 'PREBINDABLE',
4096: 'ALLMODSBOUND',
8192: 'SUBSECTIONS_VIA_SYMBOLS',
16384: 'CANONICAL',
32768: 'WEAK_DEFINES',
65536: 'BINDS_TO_WEAK',
131072: 'ALLOW_STACK_EXECUTION',
262144: 'ROOT_SAFE',
524288: 'SETUID_SAFE',
1048576: 'NOREEXPORTED_DYLIBS',
2097152: 'PIE',
4194304: 'DEAD_STRIPPABLE_DYLIB',
8388608: 'HAS_TLV_DESCRIPTORS',
16777216: 'NO_HEAP_EXECUTION',
33554432: 'APP_EXTENSION_SAFE'
}
stabs = {
0x20: 'GSYM',
0x22: 'FNAME',
0x24: 'FUN',
0x26: 'STSYM',
0x28: 'LCSYM',
0x2a: 'MAIN',
0x2e: 'BNSYM',
0x30: 'PC',
0x32: 'AST',
0x3a: 'MAC_UNDEF',
0x3c: 'OPT',
0x40: 'RSYM',
0x44: 'SLINE',
0x46: 'DSLINE',
0x48: 'BSLINE',
0x4e: 'ENSYM',
0x60: 'SSYM',
0x64: 'SO',
0x66: 'OSO',
0x80: 'LSYM',
0x82: 'BINCL',
0x84: 'SOL',
0x86: 'PARAMS',
0x88: 'VERSION',
0x8a: 'OLEVEL',
0xa0: 'PSYM',
0xa2: 'EINCL',
0xa4: 'ENTRY',
0xc0: 'LBRAC',
0xc2: 'EXCL',
0xe0: 'RBRAC',
0xe2: 'BCOMM',
0xe4: 'ECOMM',
0xe8: 'ECOML',
0xfe: 'LENG'
}
loadcommands = {
1: 'SEGMENT',
2: 'SYMTAB',
3: 'SYMSEG',
4: 'THREAD',
5: 'UNIXTHREAD',
6: 'LOADFVMLIB',
7: 'IDFVMLIB',
8: 'IDENT',
9: 'FVMFILE',
10: 'PREPAGE',
11: 'DYSYMTAB',
12: 'LOAD_DYLIB',
13: 'ID_DYLIB',
14: 'LOAD_DYLINKER',
15: 'ID_DYLINKER',
16: 'PREBOUND_DYLIB',
17: 'ROUTINES',
18: 'SUB_FRAMEWORK',
19: 'SUB_UMBRELLA',
20: 'SUB_CLIENT',
21: 'SUB_LIBRARY',
22: 'TWOLEVEL_HINTS',
23: 'PREBIND_CKSUM',
25: 'SEGMENT_64',
26: 'ROUTINES_64',
27: 'UUID',
29: 'CODE_SIGNATURE',
30: 'SEGMENT_SPLIT_INFO',
32: 'LAZY_LOAD_DYLIB',
33: 'ENCRYPTION_INFO',
34: 'DYLD_INFO',
36: 'VERSION_MIN_MACOSX',
37: 'VERSION_MIN_IPHONEOS',
38: 'FUNCTION_STARTS',
39: 'DYLD_ENVIRONMENT',
41: 'DATA_IN_CODE',
42: 'SOURCE_VERSION',
43: 'DYLIB_CODE_SIGN_DRS',
44: 'ENCRYPTION_INFO_64',
45: 'LINKER_OPTION',
46: 'LINKER_OPTIMIZATION_HINT',
47: 'VERSION_MIN_TVOS',
48: 'VERSION_MIN_WATCHOS',
49: 'NOTE',
50: 'BUILD_VERSION',
2147483672: 'LOAD_WEAK_DYLIB',
2147483676: 'RPATH',
2147483679: 'REEXPORT_DYLIB',
2147483682: 'DYLD_INFO_ONLY',
2147483683: 'LOAD_UPWARD_DYLIB',
2147483688: 'MAIN',
}
# CPU Types & Subtypes as defined in
# http://opensource.apple.com/source/cctools/cctools-822/include/mach/machine.h
cputypes = {
-1: {
-2: 'ANY',
-1: 'MULTIPLE',
0: 'LITTLE_ENDIAN',
1: 'BIG_ENDIAN'
},
1: {
-2: 'VAX',
-1: 'MULTIPLE',
0: 'VAX_ALL',
1: 'VAX780',
2: 'VAX785',
3: 'VAX750',
4: 'VAX730',
5: 'UVAXI',
6: 'UVAXII',
7: 'VAX8200',
8: 'VAX8500',
9: 'VAX8600',
10: 'VAX8650',
11: 'VAX8800',
12: 'UVAXIII'
},
6: {
-2: 'MC680x0',
-1: 'MULTIPLE',
1: 'MC680x0_ALL or MC68030',
2: 'MC68040',
3: 'MC68030_ONLY'
},
7: {-2: 'X86 (I386)',
-1: 'MULITPLE',
0: 'INTEL_MODEL_ALL',
3: 'X86_ALL, X86_64_ALL, I386_ALL, or 386',
4: 'X86_ARCH1 or 486',
5: '586 or PENT',
8: 'X86_64_H or PENTIUM_3',
9: 'PENTIUM_M',
10: 'PENTIUM_4',
11: 'ITANIUM',
12: 'XEON',
15: 'INTEL_FAMILY_MAX',
22: 'PENTPRO',
24: 'PENTIUM_3_M',
26: 'PENTIUM_4_M',
27: 'ITANIUM_2',
28: 'XEON_MP',
40: 'PENTIUM_3_XEON',
54: 'PENTII_M3',
86: 'PENTII_M5',
103: 'CELERON',
119: 'CELERON_MOBILE',
132: '486SX'
},
10: {
-2: 'MC98000',
-1: 'MULTIPLE',
0: 'MC98000_ALL',
1: 'MC98601'
},
11: {
-2: 'HPPA',
-1: 'MULITPLE',
0: 'HPPA_ALL or HPPA_7100',
1: 'HPPA_7100LC'
},
12: {
-2: 'ARM',
-1: 'MULTIPLE',
0: 'ARM_ALL',
1: 'ARM_A500_ARCH',
2: 'ARM_A500',
3: 'ARM_A440',
4: 'ARM_M4',
5: 'ARM_V4T',
6: 'ARM_V6',
7: 'ARM_V5TEJ',
8: 'ARM_XSCALE',
9: 'ARM_V7',
10: 'ARM_V7F',
11: 'ARM_V7S',
12: 'ARM_V7K',
13: 'ARM_V8',
14: 'ARM_V6M',
15: 'ARM_V7M',
16: 'ARM_V7EM'
},
13: {
-2: 'MC88000',
-1: 'MULTIPLE',
0: 'MC88000_ALL',
1: 'MMAX_JPC or MC88100',
2: 'MC88110'
},
14: {
-2: 'SPARC',
-1: 'MULTIPLE',
0: 'SPARC_ALL or SUN4_ALL',
1: 'SUN4_260',
2: 'SUN4_110'
},
15: {
-2: 'I860 (big-endian)',
-1: 'MULTIPLE',
0: 'I860_ALL',
1: 'I860_860'
},
18: {
-2: 'POWERPC',
-1: 'MULTIPLE',
0: 'POWERPC_ALL',
1: 'POWERPC_601',
2: 'POWERPC_602',
3: 'POWERPC_603',
4: 'POWERPC_603e',
5: 'POWERPC_603ev',
6: 'POWERPC_604',
7: 'POWERPC_604e',
8: 'POWERPC_620',
9: 'POWERPC_750',
10: 'POWERPC_7400',
11: 'POWERPC_7450',
100: 'POWERPC_970'
},
16777223: {
-2: 'X86_64',
-1: 'MULTIPLE',
0: 'INTEL_MODEL_ALL',
3: 'X86_ALL, X86_64_ALL, I386_ALL, or 386',
4: 'X86_ARCH1 or 486',
5: '586 or PENT',
8: 'X86_64_H or PENTIUM_3',
9: 'PENTIUM_M',
10: 'PENTIUM_4',
11: 'ITANIUM',
12: 'XEON',
15: 'INTEL_FAMILY_MAX',
22: 'PENTPRO',
24: 'PENTIUM_3_M',
26: 'PENTIUM_4_M',
27: 'ITANIUM_2',
28: 'XEON_MP',
40: 'PENTIUM_3_XEON',
54: 'PENTII_M3',
86: 'PENTII_M5',
103: 'CELERON',
119: 'CELERON_MOBILE',
132: '486SX',
2147483648 + 0: 'INTEL_MODEL_ALL',
2147483648 + 3: 'X86_ALL, X86_64_ALL, I386_ALL, or 386',
2147483648 + 4: 'X86_ARCH1 or 486',
2147483648 + 5: '586 or PENT',
2147483648 + 8: 'X86_64_H or PENTIUM_3',
2147483648 + 9: 'PENTIUM_M',
2147483648 + 10: 'PENTIUM_4',
2147483648 + 11: 'ITANIUM',
2147483648 + 12: 'XEON',
2147483648 + 15: 'INTEL_FAMILY_MAX',
2147483648 + 22: 'PENTPRO',
2147483648 + 24: 'PENTIUM_3_M',
2147483648 + 26: 'PENTIUM_4_M',
2147483648 + 27: 'ITANIUM_2',
2147483648 + 28: 'XEON_MP',
2147483648 + 40: 'PENTIUM_3_XEON',
2147483648 + 54: 'PENTII_M3',
2147483648 + 86: 'PENTII_M5',
2147483648 + 103: 'CELERON',
2147483648 + 119: 'CELERON_MOBILE',
2147483648 + 132: '486SX'
},
16777228: {
-2: 'ARM64',
-1: 'MULTIPLE',
0: 'ARM64_ALL',
1: 'ARM64_V8',
2147483648 + 0: 'ARM64_ALL',
2147483648 + 1: 'ARM64_V8'
},
16777234: {
-2: 'POWERPC64',
-1: 'MULTIPLE',
0: 'POWERPC_ALL',
1: 'POWERPC_601',
2: 'POWERPC_602',
3: 'POWERPC_603',
4: 'POWERPC_603e',
5: 'POWERPC_603ev',
6: 'POWERPC_604',
7: 'POWERPC_604e',
8: 'POWERPC_620',
9: 'POWERPC_750',
10: 'POWERPC_7400',
11: 'POWERPC_7450',
100: 'POWERPC_970',
2147483648 + 0: 'POWERPC_ALL (LIB64)',
2147483648 + 1: 'POWERPC_601 (LIB64)',
2147483648 + 2: 'POWERPC_602 (LIB64)',
2147483648 + 3: 'POWERPC_603 (LIB64)',
2147483648 + 4: 'POWERPC_603e (LIB64)',
2147483648 + 5: 'POWERPC_603ev (LIB64)',
2147483648 + 6: 'POWERPC_604 (LIB64)',
2147483648 + 7: 'POWERPC_604e (LIB64)',
2147483648 + 8: 'POWERPC_620 (LIB64)',
2147483648 + 9: 'POWERPC_750 (LIB64)',
2147483648 + 10: 'POWERPC_7400 (LIB64)',
2147483648 + 11: 'POWERPC_7450 (LIB64)',
2147483648 + 100: 'POWERPC_970 (LIB64)'
}
}
| apache-2.0 | 3,063,381,205,710,470,000 | 22.912963 | 79 | 0.521567 | false | 2.603427 | false | false | false |
Multiscale-Genomics/mg-process-fastq | tadbit_model_wrapper.py | 1 | 10041 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import argparse
import sys
import tarfile
import multiprocessing
import json
import shutil
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from random import random
from string import ascii_letters as letters
from basic_modules.workflow import Workflow
from basic_modules.metadata import Metadata
from utils import logger
from utils import remap
from tool.common import CommandLineParser
from tool.common import format_utils
from tool.tb_model import tbModelTool
if '/opt/COMPSs/Bindings/python' in sys.path:
sys.path.pop(sys.path.index('/opt/COMPSs/Bindings/python'))
# ------------------------------------------------------------------------------
class tadbit_model(Workflow): # pylint: disable=invalid-name,too-few-public-methods
"""
Wrapper for the VRE form TADbit model.
It has two main sections:
- looks for optimal parameters for modeling a region
- models a region for a given optimal parameters
.
"""
configuration = {}
def __init__(self, configuration=None):
"""
Initialise the tool with its configuration.
Parameters
----------
configuration : dict
a dictionary containing parameters that define how the operation
should be carried out, which are specific to each Tool.
"""
tool_extra_config = json.load(open(os.path.dirname(
os.path.abspath(__file__))+'/tadbit_wrappers_config.json'))
os.environ["PATH"] += os.pathsep + format_utils.convert_from_unicode(
tool_extra_config["bin_path"])
if configuration is None:
configuration = {}
self.configuration.update(format_utils.convert_from_unicode(configuration))
# Number of cores available
num_cores = multiprocessing.cpu_count()
self.configuration["ncpus"] = num_cores
tmp_name = ''.join([letters[int(random()*52)]for _ in range(5)])
if 'execution' in self.configuration:
self.configuration['project'] = self.configuration['execution']
self.configuration['workdir'] = self.configuration['project']+'/_tmp_tadbit_'+tmp_name
if not os.path.exists(self.configuration['workdir']):
os.makedirs(self.configuration['workdir'])
self.configuration["optimize_only"] = "generation:num_mod_comp" not in self.configuration
if "optimization:max_dist" in self.configuration and \
not self.configuration["optimize_only"]:
del self.configuration["optimization:max_dist"]
del self.configuration["optimization:upper_bound"]
del self.configuration["optimization:lower_bound"]
del self.configuration["optimization:cutoff"]
self.configuration.update(
{(key.split(':'))[-1]: val for key, val in self.configuration.items()}
)
if self.configuration["gen_pos_chrom_name"] == 'all':
self.configuration["gen_pos_chrom_name"] = ""
self.configuration["gen_pos_begin"] = ""
self.configuration["gen_pos_end"] = ""
if "gen_pos_begin" not in self.configuration:
self.configuration["gen_pos_begin"] = ""
if "gen_pos_end" not in self.configuration:
self.configuration["gen_pos_end"] = ""
def run(self, input_files, metadata, output_files):
"""
Parameters
----------
files_ids : list
List of file locations
metadata : list
Required meta data
output_files : list
List of output file locations
Returns
-------
outputfiles : list
List of locations for the output bam files
"""
logger.info(
"PROCESS MODEL - FILES PASSED TO TOOLS: {0}".format(
str(input_files["hic_contacts_matrix_norm"]))
)
m_results_meta = {}
m_results_files = {}
if "norm" in metadata['hic_contacts_matrix_norm'].meta_data:
if metadata['hic_contacts_matrix_norm'].meta_data["norm"] != 'norm':
clean_temps(self.configuration['workdir'])
logger.fatal("Only normalized matrices can be used to build 3D models.\nExiting")
raise ValueError('Missing normalized input matrix.')
input_metadata = remap(self.configuration,
"optimize_only", "gen_pos_chrom_name", "resolution", "gen_pos_begin",
"gen_pos_end", "max_dist", "upper_bound", "lower_bound", "cutoff",
"workdir", "project", "ncpus")
in_files = [format_utils.convert_from_unicode(input_files['hic_contacts_matrix_norm'])]
input_metadata["species"] = "Unknown"
input_metadata["assembly"] = "Unknown"
if "assembly" in metadata['hic_contacts_matrix_norm'].meta_data:
input_metadata["assembly"] = metadata['hic_contacts_matrix_norm'].meta_data["assembly"]
if metadata['hic_contacts_matrix_norm'].taxon_id:
dt_json = json.load(urlopen(
"http://www.ebi.ac.uk/ena/data/taxonomy/v1/taxon/tax-id/" +
str(metadata['hic_contacts_matrix_norm'].taxon_id)))
input_metadata["species"] = dt_json['scientificName']
input_metadata["num_mod_comp"] = self.configuration["num_mod_comp"]
input_metadata["num_mod_keep"] = self.configuration["num_mod_keep"]
tm_handler = tbModelTool()
tm_files, _ = tm_handler.run(in_files, input_metadata, [])
m_results_files["modeling_stats"] = self.configuration['project']+"/model_stats.tar.gz"
tar = tarfile.open(m_results_files["modeling_stats"], "w:gz")
tar.add(tm_files[0], arcname='modeling_files_and_stats')
tar.close()
if not self.configuration["optimize_only"]:
m_results_files["tadkit_models"] = self.configuration['project'] + "/" + \
os.path.basename(tm_files[1])
os.rename(tm_files[1], m_results_files["tadkit_models"])
m_results_meta["tadkit_models"] = Metadata(
data_type="chromatin_3dmodel_ensemble",
file_type="JSON",
file_path=m_results_files["tadkit_models"],
sources=in_files,
meta_data={
"description": "Ensemble of chromatin 3D structures",
"visible": True,
"assembly": input_metadata["assembly"]
},
taxon_id=metadata['hic_contacts_matrix_norm'].taxon_id)
# List of files to get saved
logger.info("TADBIT RESULTS: " + ','.join(
[str(m_results_files[k]) for k in m_results_files]))
m_results_meta["modeling_stats"] = Metadata(
data_type="tool_statistics",
file_type="TAR",
file_path=m_results_files["modeling_stats"],
sources=in_files,
meta_data={
"description": "TADbit modeling statistics and result files",
"visible": True
})
clean_temps(self.configuration['workdir'])
return m_results_files, m_results_meta
# ------------------------------------------------------------------------------
def main(args):
"""
Main function
"""
from apps.jsonapp import JSONApp
app = JSONApp()
result = app.launch(tadbit_model,
args.config,
args.in_metadata,
args.out_metadata)
return result
def clean_temps(working_path):
"""Cleans the workspace from temporal folder and scratch files"""
for the_file in os.listdir(working_path):
file_path = os.path.join(working_path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except OSError:
pass
try:
os.rmdir(working_path)
except OSError:
pass
logger.info('[CLEANING] Finished')
def make_absolute_path(files, root):
"""Make paths absolute."""
for role, path in files.items():
files[role] = os.path.join(root, path)
return files
# ------------------------------------------------------------------------------
if __name__ == "__main__":
sys._run_from_cmdl = True # pylint: disable=protected-access
# Set up the command line parameters
PARSER = argparse.ArgumentParser(description="TADbit map")
# Config file
PARSER.add_argument("--config", help="Configuration JSON file",
type=CommandLineParser.valid_file, metavar="config", required=True)
# Metadata
PARSER.add_argument("--in_metadata", help="Project metadata",
metavar="in_metadata", required=True)
# Output metadata
PARSER.add_argument("--out_metadata", help="Output metadata",
metavar="output_metadata", required=True)
# Log file
PARSER.add_argument("--log_file", help="Log file",
metavar="log_file", required=True)
IN_ARGS = PARSER.parse_args()
RESULTS = main(IN_ARGS)
| apache-2.0 | 7,750,510,198,224,055,000 | 35.915441 | 100 | 0.591077 | false | 4.224232 | true | false | false |
sjdv1982/seamless | seamless/communion_encode.py | 1 | 3566 | """
Encoding/decoding of communion messages
message must be a dict containing:
"mode": "request" or "response"
"id": 32-bit identifier, should increase
"content": None, bool, bytes, str, int, float, or tuple of str/int/float/bool
remaining keys: anything JSON-serializable
encoded message is binary, and consists of:
header SEAMLESS
tip: 0 for request, 1 for response
identifier: 32-bit
nrem: 32-bit, the length of the remaining keys buffer (after content)
content: is_str byte + remainder. For is_str:
0: No remainder, message is None
1: bool. remainder is 0 or 1
2: bytes. remainder is raw content
3: str. remainder is UTF-8 encoded content
4: int/float/tuple. remainder is JSON-encoded content.
rem: remaining keys buffer (JSON format)
"""
import numpy as np
import json
def communion_encode(msg):
assert msg["mode"] in ("request", "response")
m = 'SEAMLESS'.encode()
tip = b'\x00' if msg["mode"] == "request" else b'\x01'
m += tip
m += np.uint32(msg["id"]).tobytes()
remainder = msg.copy()
remainder.pop("mode")
remainder.pop("id")
remainder.pop("content")
if len(remainder.keys()):
rem = json.dumps(remainder).encode()
nrem = np.uint32(len(rem)).tobytes()
m += nrem
m += rem
else:
m += b'\x00\x00\x00\x00'
content = msg["content"]
if content is None:
m += b'\x00'
else:
assert isinstance(content, (str, int, float, bytes, bool, tuple)), content
if isinstance(content, bool):
is_str = b'\x01'
elif isinstance(content, (int, float, tuple)):
is_str = b'\x04'
else:
is_str = b'\x03' if isinstance(content, str) else b'\x02'
m += is_str
if isinstance(content, str):
content = content.encode()
elif isinstance(content, bool):
content = b'\x01' if content else b'\x00'
elif isinstance(content, (int, float, tuple)):
if isinstance(content, tuple):
for item in content:
assert item is None or isinstance(item, (str, int, float, bool)), type(item)
content = json.dumps(content).encode()
m += content
assert communion_decode(m) == msg, (communion_decode(m), msg)
return m
def communion_decode(m):
assert isinstance(m, bytes)
message = {}
head = 'SEAMLESS'.encode()
assert m[:len(head)] == head
m = m[len(head):]
tip = m[:1]
m = m[1:]
assert tip == b'\x01' or tip == b'\x00', tip
message["mode"] = "request" if tip == b'\x00' else "response"
l1, l2 = m[:4], m[4:8]
m = m[8:]
message["id"] = np.frombuffer(l1,np.uint32)[0]
nrem = np.frombuffer(l2,np.uint32)[0]
if nrem:
rem = m[:nrem]
rem = rem.decode()
rem = json.loads(rem)
message.update(rem)
m = m[nrem:]
is_str = m[:1]
if is_str == b'\x00':
content = None
elif is_str == b'\x01':
content = True if m[1:] == b'\x01' else False
elif is_str == b'\x04':
content = json.loads(m[1:])
assert isinstance(content, (int, float, list))
if isinstance(content, list):
for item in content:
assert item is None or isinstance(item, (str, int, float, bool)), type(item)
content = tuple(content)
else:
assert is_str == b'\x03' or is_str == b'\x02'
content = m[1:]
if is_str == b'\x03':
content = content.decode()
message["content"] = content
return message
| mit | -6,822,255,612,640,416,000 | 32.327103 | 96 | 0.576837 | false | 3.442085 | false | false | false |
wmaciel/van-crime | src/run_demo.py | 1 | 4046 | # coding=utf-8
__author__ = 'walthermaciel'
from geopy.geocoders import DataBC
from geopy.exc import GeopyError
from time import sleep
import sys
from ssl import SSLError
from create_feature_vector import create_vector
import os
import pandas as pd
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
crime_id = {0: 'BNE Residential ',
1: 'Theft from Vehicle',
2: 'Other Thefts ',
3: 'Mischief ',
4: 'Theft of Vehicle ',
5: 'BNE Commercial '}
def gather_time():
print 'Year:\t',
year = sys.stdin.readline().strip()
month_ok = False
while not month_ok:
print 'Month:\t',
month = sys.stdin.readline().strip()
if 12 >= int(month) > 0:
month_ok = True
else:
print 'Nice try, champ...'
return int(year), int(month)
def gather_address():
print 'Street Number:\t',
st_num = sys.stdin.readline().strip()
print 'Street Name:\t',
st_name = sys.stdin.readline().strip()
address = st_num + ' ' + st_name + ', Vancouver, BC, Canada'
return address
def gather_lat_long(address):
print 'Researching lat long for ' + address + '...'
got_it = False
delay = 1
while not got_it:
if delay > 10:
print 'could not find address, exiting...'
exit()
try:
sleep(delay)
location = geolocator.geocode(address)
got_it = True
except (GeopyError, SSLError) as e:
delay *= 2
got_it = False
print '!!! Are you sure you got the right address? Trying again...'
print 'Got it!'
latitude = "{:.8f}".format(location.latitude)
longitude = "{:.8f}".format(location.longitude)
print 'LatLong:\t( ' + latitude + ', ' + longitude + ' )'
return location.latitude, location.longitude
def run_demo():
os.system('clear')
print '''
888 888 .d8888b. d8b
888 888 d88P Y88b Y8P
888 888 888 888
Y88b d88P 8888b. 88888b. 888 888d888 888 88888b.d88b. .d88b.
Y88b d88P "88b 888 "88b 888 888P" 888 888 "888 "88b d8P Y8b
Y88o88P .d888888 888 888 888 888 888 888 888 888 888 88888888
Y888P 888 888 888 888 Y88b d88P 888 888 888 888 888 Y8b.
Y8P "Y888888 888 888 "Y8888P" 888 888 888 888 888 "Y8888
------------------ https://github.com/wmaciel/van-crime -----------------
'''
year, month = gather_time()
address = gather_address()
latitude, longitude = gather_lat_long(address)
print 'Generating feature vector...',
f_vec = create_vector(int(year), int(month), latitude, longitude)
if isinstance(f_vec, int):
print 'Failed'
else:
print 'OK'
print 'Loading classification model...',
clf = joblib.load('../models/random_forest_model.p')
print 'OK'
print 'Loading regression model...',
reg = joblib.load('../models/RandomForestRegressor.p')
print 'OK'
print '\n\n----- Results -----'
print 'Probability of crime type, given that a crime happened:'
prob_list = clf.predict_proba(f_vec.as_matrix())[0]
for i, p in enumerate(prob_list):
print crime_id[i] + '\t' + "{:.2f}".format(p * 100) + '%'
print '--------------------------\n'
print 'Expected number of crimes to happen:'
expected = reg.predict(f_vec.as_matrix())[0]
print expected
print '--------------------------\n'
print 'Expected number of crimes to happen by type:'
for i, p in enumerate(prob_list):
print crime_id[i] + '\t' + "{:.2f}".format(p * expected)
if __name__ == '__main__':
geolocator = DataBC()
while True:
run_demo()
print '\npress enter to reset'
sys.stdin.readline()
| mit | 1,701,064,191,257,659,000 | 27.9 | 79 | 0.555116 | false | 3.440476 | false | false | false |
ESEGroup/Paraguai | domain/usuario/servico_crud_usuario.py | 1 | 3946 | #-*- coding: utf-8 -*-
from .usuario import Usuario
from .nivel_acesso import *
from .senha_criptografada import *
from domain.excecoes import *
from domain.email import EmailUsuarioCadastrado, EmailUsuarioAlterado, EmailUsuarioRemovido
class ServicoCRUDUsuario():
"""Essa classe modela um serviço CRUD para Usuários, que independe da
implementação do armazenamento.
:param repositorio: Objeto de RepositorioUsuario"""
def __init__(self, repositorio, servico_email):
self.repositorio = repositorio
self.servico_email = servico_email
def criar(self, dados):
"""Cria um Usuário. Implementa o UC12 (Adicionar Usuário).
:param dados: Objeto de DTOUsuario com os dados a serem inseridos."""
escolha = {
0: UsuarioComum(),
1: SistemaManutencao(),
2: Administrador(),
}
try:
nivelAcesso = escolha[dados.nivelAcesso]
except KeyError:
raise ExcecaoNivelAcessoInvalido
senhaCriptografada = SenhaCriptografada(dados.senha)
usuario = Usuario(dados.nome, dados.email, senhaCriptografada, nivelAcesso)
if self.repositorio.obter_por_email(dados.email):
raise ExcecaoUsuarioJaExistente
usuario = self.repositorio.inserir(usuario)
email = EmailUsuarioCadastrado(usuario, dados.senha)
self.servico_email.enviar(usuario.email, email)
return usuario
def alterar(self, _id, dados):
"""Atualiza os dados de um Usuário. Implementa o UC13 (Alterar Usuário).
:param _id: Número inteiro que representa o ID do Usuário desejado.
:param dados: Objeto de DTOUsuario com os dados a serem inseridos."""
usuario = self.repositorio.obter(_id)
if not usuario:
raise ExcecaoUsuarioInexistente
#Usuário que possui o e-mail para o qual se deseja alterar
usuarioDoEmail = self.repositorio.obter_por_email(dados.email)
if usuarioDoEmail and usuarioDoEmail.id != _id:
raise ExcecaoUsuarioJaExistente
escolha = {
0: UsuarioComum(),
1: SistemaManutencao(),
2: Administrador(),
}
try:
usuario.nivelAcesso = escolha[dados.nivelAcesso]
except KeyError:
raise ExcecaoNivelAcessoInvalido
usuario.nome = dados.nome
usuario.email = dados.email
if dados.senha:
usuario.senhaCriptografada = SenhaCriptografada(dados.senha)
self.repositorio.atualizar(usuario)
email = EmailUsuarioAlterado(usuario)
self.servico_email.enviar(usuario.email, email)
return usuario
def listar(self):
"""Lista todos os Usuários, retornando uma lista de objetos de Usuario.
Implementa parte do UC04 (Buscar Usuário)."""
return self.repositorio.listar()
def obter(self, _id):
"""Busca pelo Usuário de um ID fornecido e o retorna. Implementa
parte do UC04 (Buscar Usuário).
:param _id: Número inteiro que representa o ID do Usuário desejado."""
usuario = self.repositorio.obter(_id)
if not usuario:
raise ExcecaoUsuarioInexistente
return usuario
def remover(self, _id):
"""Remove o Usuário que possui o ID fornecido e o retorna, além de
cancelar todos os seus Agendamentos. Implementa o UCXXX (Remover Usuário).
:param _id: Número inteiro que representa o ID do Usuário desejado."""
#TODO: buscar por agendamentos associados ao Usuário com id _id
usuario = self.repositorio.obter(_id)
if not usuario:
raise ExcecaoUsuarioInexistente
email = EmailUsuarioRemovido(usuario)
self.servico_email.enviar(usuario.email, email)
#TODO: cancela todos os agendamentos da lista
return (self.repositorio.remover(_id), True)
| apache-2.0 | 3,734,639,932,427,047,000 | 30.894309 | 91 | 0.653581 | false | 2.910237 | false | false | false |
lotharwissler/bioinformatics | python/gff/droso-chromosome-reconstruction.py | 1 | 2939 | #!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import getopt # comand line argument handling
from collections import defaultdict
from low import * # custom functions, written by myself
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
print >> sys.stderr, "usage: " + sys.argv[0] + " -d <gff-folder>"
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -d folder with gff files to parse" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hd:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-d': args['dir'] = value
if not args.has_key('dir'):
print >> sys.stderr, "gff dir argument missing."
show_help()
elif not dir_exists( args.get('dir') ):
print >> sys.stderr, "gff dir does not exist."
show_help()
if not args['dir'].endswith("/"): args['dir'] += '/'
return args
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
def main( args ):
def process_gff_line(line, species):
if line.startswith("#") or len(line.rstrip()) == 0: return
columns = line.rstrip().split("\t")
if len(columns) != 9: return
type = columns[2]
if type != "gene": return
chr, start, stop, strand, descr = columns[0], columns[3], columns[4], columns[6], columns[8]
id = re.search("ID=([^;]+);", descr).group(1)
sys.stdout.write(species + "\t" + id + "\t")
print string.join([chr, start, stop, strand], "\t")
# =============================================================================
for filename in os.listdir(args['dir']):
gzip = 0
if not filename.endswith(".gff") and not filename.endswith(".gff.gz"): continue
species = filename[:filename.index("-")]
filename = args['dir'] + filename
if filename.endswith(".gff.gz"): gzip = 1
if gzip:
os.system("gunzip " + filename)
filename = filename[:-3]
fo = open(filename)
for line in fo: process_gff_line(line, species)
fo.close()
if gzip: os.system("gzip " + filename)
# =============================================================================
args = handle_arguments()
main( args )
| mit | 1,956,595,199,548,383,200 | 33.988095 | 96 | 0.50051 | false | 4.180654 | false | false | false |
kawamon/hue | apps/hbase/src/hbase/hbase_site.py | 2 | 3316 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import logging
import os.path
import sys
from hadoop import confparse
from desktop.lib.security_util import get_components
if sys.version_info[0] > 2:
open_file = open
else:
open_file = file
LOG = logging.getLogger(__name__)
SITE_PATH = None
SITE_DICT = None
_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL = 'hbase.thrift.kerberos.principal'
_CNF_HBASE_THRIFT_SPNEGO_PRINCIPAL = 'hbase.thrift.spnego.principal'
_CNF_HBASE_AUTHENTICATION = 'hbase.security.authentication'
_CNF_HBASE_REGIONSERVER_THRIFT_FRAMED = 'hbase.regionserver.thrift.framed'
_CNF_HBASE_IMPERSONATION_ENABLED = 'hbase.thrift.support.proxyuser'
_CNF_HBASE_USE_THRIFT_HTTP = 'hbase.regionserver.thrift.http'
_CNF_HBASE_USE_THRIFT_SSL = 'hbase.thrift.ssl.enabled'
def reset():
global SITE_DICT
SITE_DICT = None
def get_conf():
if SITE_DICT is None:
_parse_site()
return SITE_DICT
def get_server_principal():
thrift_principal = get_conf().get(_CNF_HBASE_THRIFT_KERBEROS_PRINCIPAL, None)
principal = get_conf().get(_CNF_HBASE_THRIFT_SPNEGO_PRINCIPAL, thrift_principal)
components = get_components(principal)
if components is not None:
return components[0]
def get_server_authentication():
return get_conf().get(_CNF_HBASE_AUTHENTICATION, 'NOSASL').upper()
def get_thrift_transport():
use_framed = get_conf().get(_CNF_HBASE_REGIONSERVER_THRIFT_FRAMED)
if use_framed is not None:
if use_framed.upper() == "TRUE":
return "framed"
else:
return "buffered"
else:
#Avoid circular import
from hbase.conf import THRIFT_TRANSPORT
return THRIFT_TRANSPORT.get()
def is_impersonation_enabled():
#Avoid circular import
from hbase.conf import USE_DOAS
return get_conf().get(_CNF_HBASE_IMPERSONATION_ENABLED, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()
def is_using_thrift_http():
#Avoid circular import
from hbase.conf import USE_DOAS
return get_conf().get(_CNF_HBASE_USE_THRIFT_HTTP, 'FALSE').upper() == 'TRUE' or USE_DOAS.get()
def is_using_thrift_ssl():
return get_conf().get(_CNF_HBASE_USE_THRIFT_SSL, 'FALSE').upper() == 'TRUE'
def _parse_site():
global SITE_DICT
global SITE_PATH
#Avoid circular import
from hbase.conf import HBASE_CONF_DIR
SITE_PATH = os.path.join(HBASE_CONF_DIR.get(), 'hbase-site.xml')
try:
data = open_file(SITE_PATH, 'r').read()
except IOError as err:
if err.errno != errno.ENOENT:
LOG.error('Cannot read from "%s": %s' % (SITE_PATH, err))
return
data = ""
SITE_DICT = confparse.ConfParse(data)
| apache-2.0 | 4,130,980,423,032,617,500 | 28.607143 | 102 | 0.721653 | false | 3.090401 | false | false | false |
windelbouwman/ppci-mirror | ppci/binutils/disasm.py | 1 | 1210 | """ Contains disassembler stuff. """
from ..arch.data_instructions import DByte
class Disassembler:
""" Base disassembler for some architecture """
def __init__(self, arch):
self.arch = arch
for instruction in arch.isa.instructions:
# print(instruction, instruction.patterns)
# for nl in instruction.non_leaves:
# print(' ', nl.patterns)
pass
def disasm(self, data, outs, address=0):
""" Disassemble data into an instruction stream """
# TODO: implement this!
# The trial and error method, will be slow as a snail:
# for instruction in self.arch.isa.instructions:
# for size in instruction.sizes():
# part = data[:size]
# try:
# print(instruction, part, size)
# i = instruction.decode(part)
# print(i)
# except ValueError:
# pass
# For now, all is bytes!
for byte in data:
ins = DByte(byte)
ins.address = address
outs.emit(ins)
address += len(ins.encode())
def take_one(self):
pass
| bsd-2-clause | -6,070,502,765,213,653,000 | 29.25 | 62 | 0.52562 | false | 4.384058 | false | false | false |
Domatix/stock-logistics-workflow | stock_split_picking/models/stock_picking.py | 2 | 3356 | # Copyright 2013-2015 Camptocamp SA - Nicolas Bessi
# Copyright 2018 Camptocamp SA - Julien Coux
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, models
from odoo.exceptions import UserError
from odoo.tools.float_utils import float_compare
class StockPicking(models.Model):
"""Adds picking split without done state."""
_inherit = "stock.picking"
@api.multi
def split_process(self):
"""Use to trigger the wizard from button with correct context"""
for picking in self:
# Check the picking state and condition before split
if picking.state == 'draft':
raise UserError(_('Mark as todo this picking please.'))
if all([x.qty_done == 0.0 for x in picking.move_line_ids]):
raise UserError(
_('You must enter done quantity in order to split your '
'picking in several ones.'))
# Split moves considering the qty_done on moves
new_moves = self.env['stock.move']
for move in picking.move_lines:
rounding = move.product_uom.rounding
qty_done = move.quantity_done
qty_initial = move.product_uom_qty
qty_diff_compare = float_compare(
qty_done, qty_initial, precision_rounding=rounding
)
if qty_diff_compare < 0:
qty_split = qty_initial - qty_done
qty_uom_split = move.product_uom._compute_quantity(
qty_split,
move.product_id.uom_id,
rounding_method='HALF-UP'
)
new_move_id = move._split(qty_uom_split)
for move_line in move.move_line_ids:
if move_line.product_qty and move_line.qty_done:
# To avoid an error
# when picking is partially available
try:
move_line.write(
{'product_uom_qty': move_line.qty_done})
except UserError:
pass
new_moves |= self.env['stock.move'].browse(new_move_id)
# If we have new moves to move, create the backorder picking
if new_moves:
backorder_picking = picking.copy({
'name': '/',
'move_lines': [],
'move_line_ids': [],
'backorder_id': picking.id,
})
picking.message_post(
_(
'The backorder <a href="#" '
'data-oe-model="stock.picking" '
'data-oe-id="%d">%s</a> has been created.'
) % (
backorder_picking.id,
backorder_picking.name
)
)
new_moves.write({
'picking_id': backorder_picking.id,
})
new_moves.mapped('move_line_ids').write({
'picking_id': backorder_picking.id,
})
new_moves._action_assign()
| agpl-3.0 | 3,325,539,118,000,248,000 | 40.95 | 76 | 0.470203 | false | 4.801144 | false | false | false |
tjctw/PythonNote | thinkstat/install_test.py | 2 | 1432 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib.pyplot as pyplot
import myplot
import Pmf
def NormalPdf(x):
"""Computes the PDF of x in the standard normal distribution."""
return math.exp(-x**2/2) / math.sqrt(2 * math.pi)
def Linspace(start, stop, n):
"""Makes a list of n floats from start to stop.
Similar to numpy.linspace()
"""
return [start + (stop-start) * float(i)/(n-1) for i in range(n)]
def RenderPdf(mu, sigma, n=101):
"""Makes xs and ys for a normal PDF with (mu, sigma).
n: number of places to evaluate the PDF
"""
xs = Linspace(mu-4*sigma, mu+4*sigma, n)
ys = [NormalPdf((x-mu) / sigma) for x in xs]
return xs, ys
def main():
xs, ys = RenderPdf(100, 15)
n = 34
pyplot.fill_between(xs[-n:], ys[-n:], y2=0.0001, color='blue', alpha=0.2)
s = 'Congratulations!\nIf you got this far,\nyou must be here.'
d = dict(shrink=0.05)
pyplot.annotate(s, [127, 0.02], xytext=[80, 0.05], arrowprops=d)
myplot.Plot(xs, ys,
clf=False,
show=True,
title='Distribution of IQ',
xlabel='IQ',
ylabel='PDF',
legend=False
)
if __name__ == "__main__":
main()
| cc0-1.0 | -2,332,128,312,569,563,000 | 23.689655 | 77 | 0.587291 | false | 3.140351 | false | false | false |
LiqunHu/MVPN | testing/testUser.py | 1 | 2284 | # -*- coding: utf-8 -*-
"""
Created on Thu May 12 16:25:02 2016
@author: huliqun
"""
import requests
import json
import uuid
import base64
_SERVER_HOST = '127.0.0.1'
_SERVER_PORT = 8000
_SERVER_BASE_URL = 'http://{0}:{1}/api/users'.format(_SERVER_HOST, _SERVER_PORT)
headers = {'content-type':'application/json'}
body = '{"username":"wahaha@qq.com","displayname":"wahaha","email":"wahaha@qq.com","password":"123456","mobile":"18698729476"}'
#resp = requests.get(_SERVER_BASE_URL)
resp = requests.post(_SERVER_BASE_URL, headers=headers,data=body)
print(resp.text)
print(resp)
headers = {'Authorization': '3161cc5a950fead158ebe803f7e56822',
'Account-ID': '111111111111111',
'content-type':'application/json'}
password = '123456'
#resp = requests.get(_SERVER_BASE_URL, headers=headers,data=body)
#print(resp.text)
#print(resp)
import pyDes
import hashlib
def md5(s):
m = hashlib.md5()
m.update(s.encode("utf-8"))
return m.digest()
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = pyDes.des(b"DESCRYPT", pyDes.CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=pyDes.PAD_PKCS5)
data = str(uuid.uuid4()).replace('-','')
k = pyDes.triple_des(md5('123456'), pyDes.CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=pyDes.PAD_PKCS5)
d = base64.b64encode(k.encrypt(data)).decode()
idf = base64.b64encode(k.encrypt('wahaha@qq.com')).decode()
headers = {'content-type':'application/json'}
bodyData = {
'username':'wahaha@qq.com',
'identifyCode':idf
}
print(idf)
body = json.dumps(bodyData)
print("Encrypted: %r" % idf)
print("Decrypted: %r" % k.decrypt(base64.b64decode(idf.encode())).decode() )
_SERVER_BASE_URL = 'http://{0}:{1}/api/auth'.format(_SERVER_HOST, _SERVER_PORT)
resp = requests.get(_SERVER_BASE_URL, headers=headers,data=body)
print(resp.text)
print(resp)
headers = {'Cookie':'awesession=c7f406241bcc49209eb58a527520e051-1465822334-fe92174a8e3956edc8befc20911a0b54c8f7b2db; Domain=aaaa.com;',
'content-type':'application/json'}
_SERVER_BASE_URL = 'http://{0}:{1}/api/users'.format(_SERVER_HOST, _SERVER_PORT)
resp = requests.get(_SERVER_BASE_URL, headers=headers,data=body)
print(resp.text)
print(resp)
| gpl-3.0 | -2,363,591,250,241,619,500 | 31.101449 | 136 | 0.662434 | false | 2.646582 | false | false | false |
roshantha9/AbstractManycoreSim | src/analyse_results/AnalyseResults_Exp_HEVCSplitTiles_KaushikTuner.py | 1 | 10541 | import sys, os, csv, pprint, math
#sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
## uncomment when running under CLI only version ##
#import matplotlib
#matplotlib.use('Agg')
#sys.path.append("/shared/storage/cs/staffstore/hrm506/simpy-3.0.5/")
#sys.path.append("/shared/storage/cs/staffstore/hrm506/networkx-1.10/networkx-1.10")
from collections import OrderedDict
import numpy as np
import traceback
from collections import Iterable
import re
import pylab
import random
import shutil
import math
import matplotlib
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from mpl_toolkits.mplot3d import Axes3D
import scipy.stats
import scipy.optimize as scipy_optimize
import itertools
from matplotlib.colors import ListedColormap, NoNorm, rgb2hex
from matplotlib import mlab
from itertools import cycle # for automatic markers
import json
from operator import itemgetter
from scipy import stats
from collections import Counter
import multiprocessing
#from scipy.stats import gaussian_kde
import matplotlib.ticker
import matplotlib.cm as cm
import matplotlib.patches as patches
from matplotlib.font_manager import FontProperties
from SimParams import SimParams
SHOW_PLOTS = True
USE_MULTIPROCESSING = False
NOC_H = 8
NOC_W = 8
MAX_SEEDS = 30
FAILED_SEEDS = []
#RANDOM_SEEDS =[s for s in RANDOM_SEEDS if s not in FAILED_SEEDS][:MAX_SEEDS]
RANDOM_SEEDS_MISC=[33749, 43894, 26358, 80505]
RANDOM_SEEDS = RANDOM_SEEDS_MISC
print len(set(RANDOM_SEEDS))
#sys.exit()
EXP_DATADIR = "Z:/MCASim/experiment_data/hevc_tile_mapping_kaushikTuner/"
DATA_TEMP_SAVE_LOC = "../experiment_data/hevc_tile_mapping_kaushikTuner/"
CMB_ID = 912
MMP_ID = 0
#global_mp_order = [d['lbl'] for d in global_types_of_tests]
KAUSHIK_COMMS_SCALE_FACTOR = [6, 9, 12, 15, 20, 25, 30, 35, 40, 45, 50]
WORKLOAD_KEY = "WL2"
def _save_data(fname, data):
final_fname = DATA_TEMP_SAVE_LOC + fname
logfile=open(final_fname, 'w')
json_data = json.dumps(data)
logfile.write(json_data)
logfile.close()
def _load_data(fname):
final_fname = DATA_TEMP_SAVE_LOC + fname
json_data=open(final_fname)
data = json.load(json_data)
return data
def _gen_exp_key (cmb, mmp, ksf):
exp_key = "cmb"+str(cmb)+ \
"mmp"+str(mmp)+ \
"ksf"+str(ksf)
return exp_key
def _get_final_fname(fname, exp_key, wl_cfg, seed):
subdir1 = EXP_DATADIR + wl_cfg + "/" + exp_key + "/"
subdir2 = subdir1 + "seed_"+str(seed)+"/"
fname_prefix = "HEVCTileSplitTest__" + exp_key + "_" + str(NOC_H)+"_"+str(NOC_W)+"_"
finalfname_completedtasks = subdir2 + fname_prefix + fname
return finalfname_completedtasks
def _normalise_list(lst, norm_min=None, norm_max=None):
if norm_max == None:
norm_max = np.max(lst)
if norm_min == None:
norm_min = np.min(lst)
new_list = []
for each_l in lst:
x = each_l
norm_val = (x-norm_min)/(norm_max-norm_min)
new_list.append(norm_val)
return new_list
def boxplot_colorize(bp, param_col, fc='#B8DCE6'):
i=0
## change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#000000', linewidth=1)
# change fill color
box.set( facecolor = param_col)
i+=1
## change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#000000', linewidth=1, linestyle='-')
## change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#000000', linewidth=1)
## change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#000000', linewidth=1)
## change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='x', color='red', alpha=0.5)
def plot_CommsOverhead_and_GoPLateness_Combined(load_data=False, show_plots=False):
data_fname_comms = "plot_comms.json"
data_fname_gopl = "plot_gopl.json"
### get data ####
alldata_perseed_commsoverhead = OrderedDict()
alldata_perseed_goplateness = OrderedDict()
if load_data==True:
alldata_perseed_commsoverhead = _load_data(data_fname_comms)
alldata_perseed_goplateness = _load_data(data_fname_gopl)
else:
for each_ksf in KAUSHIK_COMMS_SCALE_FACTOR:
alldata_perseed_commsoverhead[each_ksf] = None
alldata_perseed_goplateness[each_ksf] = None
# which exp condition ?
exp_key = _gen_exp_key(
CMB_ID,
MMP_ID,
each_ksf
)
exp_lbl = each_ksf
each_seed_data_comms = []
each_seed_data_goplateness = []
for each_seed in RANDOM_SEEDS:
# get filename
finalfname_comms = _get_final_fname("_flwcompletedshort.js", exp_key, WORKLOAD_KEY, each_seed)
finalfname_gopsummary = _get_final_fname("_gopsopbuffsumm.js", exp_key, WORKLOAD_KEY, each_seed)
try:
print "getting : ", finalfname_comms
## get file data
json_data=open(finalfname_comms)
file_data = json.load(json_data)
flows_bl = [f[0] for f in file_data['flows_completed'] if f[2] in [1,15]]
flows_payload = [_get_payload_from_flowbl(bl) for bl in flows_bl]
flows_bl_sum = np.sum(flows_payload)
# save
each_seed_data_comms.append(flows_bl_sum)
print "getting : ", finalfname_gopsummary
## get file data
json_data=open(finalfname_gopsummary)
file_data = json.load(json_data)
gop_lateness_dist = [g['gop_execution_lateness'] for gid, g in file_data.iteritems()]
# save
each_seed_data_goplateness.extend(gop_lateness_dist)
except Exception, e:
tb = traceback.format_exc()
print tb
sys.exit(e)
alldata_perseed_commsoverhead[each_ksf] = each_seed_data_comms
alldata_perseed_goplateness[each_ksf] = each_seed_data_goplateness
# save data
if load_data==False:
_save_data(data_fname_comms, alldata_perseed_commsoverhead)
_save_data(data_fname_gopl, alldata_perseed_goplateness)
if show_plots==False:
return
### plot data ####
fig, ax1 = plt.subplots()
fig.canvas.set_window_title('plot_GopL_CommsOvh_Combined')
ydata_comms = [np.mean(alldata_perseed_commsoverhead[str(k)]) for k in KAUSHIK_COMMS_SCALE_FACTOR]
ydata_gopl = [alldata_perseed_goplateness[str(k)] for k in KAUSHIK_COMMS_SCALE_FACTOR]
xdata = np.arange(len(KAUSHIK_COMMS_SCALE_FACTOR))
ax1.boxplot(ydata_gopl, positions=xdata)
ax1.set_ylabel('GoPLateness')
ax2 = ax1.twinx()
ax2.plot(xdata, ydata_comms, 'r-', linewidth=2)
ax2.set_ylabel('CommOverhead', color='r')
print "---"
plt.grid(axis='y',b=True, which='major', color='k', linestyle='--', alpha=0.3)
plt.grid(axis='y',b=True, which='minor', color='k', linestyle='-', alpha=0.2)
plt.minorticks_on()
ax1.tick_params(axis = 'y', which = 'both')
ax2.tick_params(axis = 'y', which = 'both')
#plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0), labelsize=20)
#plt.tick_params(axis='both', which='major', labelsize=16)
#plt.tick_params(axis='both', which='minor', labelsize=16)
#plt.rc('font', **{'size':'16'})
#ax.set_xticks(ind+0.5)
ax1.set_xticks(xdata)
ax1.set_xticklabels(KAUSHIK_COMMS_SCALE_FACTOR, rotation=40)
def _get_payload_from_flowbl(flw_bl):
p = SimParams.NOC_PERIOD
payload = (16.0*(flw_bl - (70.0*p*p)))/p
return payload
def _write_formatted_file(fname, data, format):
if(format == "pretty"):
logfile=open(fname, 'w')
pprint(data, logfile, width=128)
elif(format == "json"):
logfile=open(fname, 'w')
json_data = json.dumps(data)
logfile.write(json_data)
else:
logfile=open(fname, 'w')
pprint(data, logfile, width=128)
def func_fit_data(x, a, b, c):
return a * np.exp(-b * x) + c
###################################
# HELPERS
###################################
###################################
# MAIN
###################################
if __name__ == "__main__":
plot_CommsOverhead_and_GoPLateness_Combined(load_data=True, show_plots=True)
plt.show()
class LogFormatterTeXExponent(pylab.LogFormatter, object):
"""Extends pylab.LogFormatter to use
tex notation for tick labels."""
def __init__(self, *args, **kwargs):
super(LogFormatterTeXExponent,
self).__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
"""Wrap call to parent class with
change to tex notation."""
label = super(LogFormatterTeXExponent,
self).__call__(*args, **kwargs)
label = re.sub(r'e(\S)0?(\d+)',
r'\\times 10^{\1\2}',
str(label))
label = "$" + label + "$"
return label
| gpl-3.0 | -5,360,821,009,173,282,000 | 29.731778 | 132 | 0.533725 | false | 3.567174 | false | false | false |
jimbelton/wikidata | lib/language.py | 1 | 19199 | # -*- coding: utf-8 -*-
# All language names have had the words characters, languages and language removed and all remaining words capitalized
# 1. The list of languages in ISO 639-1 is from the standard: http://www.loc.gov/standards/iso639-2/
# 2. The list of languages in wikidata was taken from the 'original language of work' properties of all books in the dump from
# 20160215, and may not be complete.
# These are the primary language names and their codes from ISO 639-1, or in rare cases, from ISO 639-2
#
nameToIso639Id = {
"Abkhazian": "ab",
"Afar": "aa",
"Afrikaans": "af", # Found in wikidata
"Akan": "ak",
"Albanian": "sq",
"Algonquian": "alg", # No similar language in ISO 639-1
"Amharic": "am", # Found in wikidata
"Ancient Greek": "grc", # Found in wikidata
"Arabic": "ar", # Found in wikidata
"Aragonese": "an", # Found in wikidata
"Aramaic": "arc", # Found in wikidata
"Armenian": "hy", # Found in wikidata
"Assamese": "as", # Found in wikidata
"Avaric": "av",
"Avestan": "ae",
"Awadhi": "awa", # Found in wikidata
"Aymara": "ay",
"Azerbaijani": "az", # Found in wikidata
"Bambara": "bm",
"Bantu": "bnt", # No similar language in ISO 639-1
"Bashkir": "ba",
"Basque": "eu", # Found in wikidata
"Belarusian": "be", # Found in wikidata
"Bengali": "bn", # Found in wikidata
"Berber": "ber", # No similar language in ISO 639-1
"Bihari": "bh",
"Bislama": "bi",
"Bosnian": "bs", # Found in wikidata
"Breton": "br", # Found in wikidata
"Bulgarian": "bg", # Found in wikidata
"Burmese": "my", # Found in wikidata
"Catalan": "ca", # Found in wikidata
"Central Khmer": "km",
"Chamorro": "ch",
"Chechen": "ce",
"Chichewa": "ny",
"Chinese": "zh", # Found in wikidata
"Chuvash": "cv",
"Cornish": "kw",
"Corsican": "co",
"Cree": "cr",
"Croatian": "hr", # Found in wikidata
"Czech": "cs", # Found in wikidata
"Danish": "da", # Found in wikidata
"Divehi": "dv",
"Dutch": "nl", # Found in wikidata
"Dzongkha": "dz",
"English": "en",
"Esperanto": "eo", # Found in wikidata
"Estonian": "et", # Found in wikidata
"Ewe": "ee",
"Faroese": "fo", # Found in wikidata
"Fijian": "fj",
"Filipino": "fil", # Found in wikidata
"Finnish": "fi", # Found in wikidata
"French": "fr", # Found in wikidata
"Fulah": "ff",
"Galician": "gl", # Found in wikidata
"Ganda": "lg",
"Georgian": "ka", # Found in wikidata
"German": "de", # Found in wikidata
"Greek": "el", # Found in wikidata
"Guaraní": "gn",
"Gujarati": "gu", # Found in wikidata
"Haitian": "ht",
"Hausa": "ha",
"Hawaiian": "haw", # No similar language in ISO 639-1
"Hebrew": "he", # Found in wikidata
"Herero": "hz",
"Hindi": "hi", # Found in wikidata
"Hiri Motu": "ho",
"Hmong": "hmn", # Found in wikidata
"Hungarian": "hu", # Found in wikidata
"Icelandic": "is", # Found in wikidata
"Ido": "io",
"Igbo": "ig",
"Indonesian": "id", # Found in wikidata
"Interlingua": "ia",
"Interlingue": "ie",
"Inupiaq": "ik",
"Irish": "ga", # Found in wikidata
"Italian": "it", # Found in wikidata
"Inuktitut": "iu",
"Japanese": "ja", # Found in wikidata
"Javanese": "jv",
"Judeo-Arabic": "jrb", # Found in wikidata
"Kalaallisut": "kl",
"Kannada": "kn", # Found in wikidata
"Kanuri": "kr",
"Karelian": "krl", # Found in wikidata
"Kashubian": "csb", # Found in wikidata
"Kashmiri": "ks",
"Kazakh": "kk",
"Kikuyu": "ki",
"Kinyarwanda": "rw",
"Kirundi": "rn",
"Komi": "kv",
"Kongo": "kg",
"Konkani": "kok", # Found in wikidata
"Korean": "ko", # Found in wikidata
"Kurdish": "ku",
"Kwanyama": "kj",
"Kyrgyz": "ky",
"Ladino": "lad", # Found in wikidata
"Latin": "la", # Found in wikidata
"Latvian": "lv",
"Luxembourgish": "lb",
"Limburgish": "li",
"Lingala": "ln",
"Lao": "lo",
"Lithuanian": "lt", # Found in wikidata
"Luba-Katanga": "lu",
"Manx": "gv",
"Macedonian": "mk", # Found in wikidata
"Malagasy": "mg",
"Malay": "ms", # Found in wikidata
"Malayalam": "ml", # Found in wikidata
"Maltese": "mt",
"Manx": "gv",
"Maori": "mi",
"Marathi": "mr", # Found in wikidata
"Marshallese": "mh",
"Mayan": "myn", # No similar language in ISO 639-1
"Mongolian": "mn",
"Nahuatl": "nah", # Found in wikidata. No similar language in ISO 639-1
"Nauru": "na",
"Navajo": "nv",
"Ndonga": "ng",
"Neapolitan": "nap", # Found in wikidata
"Nepali": "ne", # Found in wikidata
"North Ndebele": "nd",
"Northern Sami": "se",
"Norwegian": "no", # Found in wikidata
"Norwegian Bokmål": "nb",
"Norwegian Nynorsk": "nn",
"Nuosu": "ii",
"Southern Ndebele": "nr",
"Occitan": "oc", # Found in wikidata
"Ojibwe": "oj",
"Old Church Slavonic": "cu", # Found in wikidata
"Old Norse": "non", # Found in wikidata
"Oriya": "or",
"Oromo": "om",
"Ossetian": "os",
"Pali": "pi", # Found in wikidata
"Pashto": "ps",
"Persian": "fa", # Found in wikidata
"Polish": "pl", # Found in wikidata
"Portuguese": "pt", # Found in wikidata
"Prakrit": "pra", # Found in wikidata
"Punjabi": "pa", # Found in wikidata
"Quechua": "qu",
"Romansh": "rm", # Found in wikidata
"Romanian": "ro", # Found in wikidata
"Rundi": "rn",
"Russian": "ru", # Found in wikidata
"Samoan": "sm",
"Sango": "sg",
"Sanskrit": "sa", # Found in wikidata
"Sardinian": "sc",
"Scottish Gaelic": "gd", # Found in wikidata
"Serbian": "sr", # Found in wikidata
"Shona": "sn",
"Sicilian": "scn", # Found in wikidata
"Sindhi": "sd", # Found in wikidata
"Sinhala": "si", # Found in wikidata
"Slovak": "sk", # Found in wikidata
"Slovenian": "sl", # Found in wikidata
"Somali": "so",
"South Ndebele": "nr",
"Southern Sotho": "st",
"Spanish": "es", # Found in wikidata
"Sundanese": "su",
"Swahili": "sw",
"Swati": "ss",
"Swedish": "sv", # Found in wikidata
"Tahitian": "ty",
"Tajik": "tg",
"Tamil": "ta", # Found in wikidata
"Tatar": "tt",
"Telugu": "te", # Found in wikidata
"Tajik": "tg",
"Thai": "th", # Found in wikidata
"Tigrinya": "ti",
"Tibetan": "bo",
"Tswana": "tn",
"Tonga": "to",
"Tsonga": "ts",
"Tswana": "tn",
"Turkish": "tr", # Found in wikidata
"Turkmen": "tk",
"Tsonga": "ts",
"Tatar": "tt",
"Twi": "tw",
"Tahitian": "ty",
"Uyghur": "ug",
"Ukrainian": "uk", # Found in wikidata
"Urdu": "ur", # Found in wikidata
"Uto-Aztecan": "azc", # No similar language in ISO 639-1
"Uzbek": "uz",
"Venda": "ve",
"Vietnamese": "vi", # Found in wikidata
"Volapük": "vo",
"Walloon": "wa",
"Welsh": "cy", # Found in wikidata
"Wolof": "wo",
"Western Frisian": "fy",
"Xhosa": "xh",
"Yiddish": "yi", # Found in wikidata
"Yoruba": "yo",
"Zhuang": "za",
"Zulu": "zu"
}
# These are aliases from ISO 639-1 and wikidata, and the closest ISO 639-1 codes (or, in a few cases, 639-2 codes). The wikidata
# language aliases have been forced to the nearest 639-1 code whereever possible, with comments indicating that there is a better
# fit in 639-2 if that is the case. This was done to keep the number of lanaguage codes to a minimum, but it means (for example)
# that all Aryan languages map to "hi" (Hindi)
#
aliasToIso639Id = {
"American English": "en", # From wikidata
"Australian English": "en", # From wikidata
"Austrian German": "de", # From wikidata
"Bahasa Melayu Sabah": "ms", # From wikidata
"Bangla": "bn",
"Bhojpuri": "bh",
u"Bokm\u00e5l": "nb", # From wikidata
"Brazil": "pt", # From wikidata
"Brazil Portuguese": "pt", # From wikidata
"Brazilian Portuguese": "pt", # From wikidata
"British English": "en", # From wikidata
"Burgundian": "de", # From wikidata: Similar to Gothic, which has its own code in ISO 639-2, "got"
"Canadian English": "en", # From wikidata
"Castilian": "es",
u"Catal\u00e1n": "ca", # From wikidata
"Central": "bo",
"Chewa": "ny",
"Chuang": "za",
"Church Slavic": "cu",
"Church Slavonic": "cu", # From ISO 639-1, found in wikidata
"Classical Armenian": "hy", # From wikidata
"Classical Chinese": "zh", # From wikidata
"Classical Nahuatl": "nah", # From wikidata
"Common Brittonic": "br", # From wikidata
"Dhivehi": "dv",
"Early Modern English": "en", # From wikidata
"Early Modern Spanish": "es", # From wikidata
"Early New High German": "de", # From wikidata
"Egyptian Arabic": "ar", # From wikidata
"Farsi": "fa",
"Tagalog": "fil", # Found in wikidata. Intentionally categorized as Filipino, not "tl" (Tagalog).
"Flemish": "nl", # From ISO 639-2, found in wikidata
"France": "fr", # From wikidata
"Fulah": "ff",
"Gaelic": "gd",
"Geordie Dialect": "en", # From wikidata
"Gikuyu": "ki",
"Greenlandic": "kl",
"Haitian Creole": "ht",
"Hawaiian Pidgin": "haw", # From wikidata
"Hiberno-English": "en", # From wikidata
"Hopi": "azc", # From wikidata. Not in ISO 639-1. An Uto-Aztecan language: "azc" in ISO 639-5
"Indian English": "en", # From wikidata
"Italiano Moderno": "it", # From wikidata
"Jamaican Patois": "en", # From wikidata. No similar in ISO 639-1. English based creole: "cpe" in ISO 639-2
"Kalaallisut": "kl",
"Kanbun": "ja", # From wikidata. Annotated Classical Chinese that can be read in Japanese
"Katharevousa": "el", # From wikidata
"Kerewe": "bnt", # From wikidata. No similar in ISO 639-1. A Bantu language: "bnt" in ISO 639-2
"Khmer": "km", # From in wikidata
"Kirghiz": "ky",
"Koine Greek": "el", # From wikidata
"Kuanyama": "kj",
"Late Old Japanese": "ja", # From wikidata
"Letzeburgesch": "lb",
"Limburgan": "li",
"Limburger": "li",
"Luganda": "lg",
"Magahi": "bh",
"Maghrebi Arabic": "ar", # From wikidata
"Maithili": "bh", # From ISO 639-1. Found in wikidata
"Malaysian": "ms", # From wikidata
"Malay Trade And Creole": "ms", # From wikidata
"Maldivian": "dv",
"Mandarin Chinese": "zh", # From wikidata
"Manglish": "en", # From wikidata
"Massachusett": "alg", # From wikidata. No similar in ISO 639-1. An Algonquian language: "alg" ISO 639-2
"Medieval Latin": "la", # From wikidata
"Middle English": "en", # From wikidata. Middle English has its own code in ISO 639-2, "enm"
"Middle French": "fr", # From wikidata. Middle French has its own code in ISO 639-2, "frm"
"Mittelalterliches Aragonesisch": "an", # From wikidata
"Modern Greek": "el", # From wikidata
"Moldavian": "ro",
"Moldovan": "ro",
"Mon": "km", # From wikidata. Mon-Khnmer languages have there own code in ISO 639-2, "mkh"
"Navaho": "nv",
"Netherlands": "nl", # From wikidata
"Nigerian Pidgin": "en", # From wikidata. No similar in ISO 639-1. English based creole: "cpe" in ISO 639-2
"Nyanja": "ny",
"Nynorsk": "nn", # From wikidata
"Occidental": "ie",
"Odia": "hi", # From wikidata
"Ojibwa": "oj",
"Old Bulgarian": "cu",
"Old Chinese": "zh", # From wikidata
"Old East Slavic": "cu", # From wikidata
"Old French": "fr", # From wikidata. Old French has its own code in ISO 639-2, "fro"
"Old Slavonic": "cu",
"Old Spanish": "es", # From wikidata
"Ossetic": "os",
"Panjabi": "pa",
"Philippine English": "en", # From wikidata
"Pulaar": "ff",
"Pular": "ff",
"Pushto": "ps",
"Quebec French": "fr", # From wikidata
u"Radical Bokm\u00e5l": "nb", # From wikidata
"Ruthenian": "cu", # From wikidata
"Scots": "gd", # From wikidata
"Scottish English": "en", # From wikidata
"Serbo-Croatian": "sr", # From wikidata
"Shan": "th", # From wikidata. Tai languages have there own code in ISO 639-2, "tai"
"Sichuan Yi": "ii",
"Sinhalese": "si",
"Slovene": "sl", # From in wikidata
"Spanish In The Philippines": "es", # From wikidata
"Standard Chinese": "zh", # From wikidata
"Taglish": "fil", # From wikidata. Tagalog using some English words
"Tuareg": "ber", # From wikidata. No similar in ISO 639-1. A Berber language: "ber" from ISO 639-2
"Tibetan Standard": "bo",
"Traditional Chinese": "zh", # From wikidata
"Uighur": "ug",
"Valencian": "ca", # From wikidata
"Western Armenian": "hy", # From wikidata
"Written Vernacular Chinese": "zh", # From wikidata
"Yucatec Maya": "myn" # From wikidata. No similar language in ISO 639-1. "myn" is from ISO 639-2
}
iso639IdToName = None # Constructed on the first call to isoIdToName
def nameToIsoId(name):
words = name.split(" ")
for i in reversed(range(len(words))):
if words[i] == "characters" or words[i] == "language" or words[i] == "languages":
del words[i]
continue
words[i] = words[i][0].upper() + words[i][1:]
name = " ".join(words)
if name in nameToIso639Id:
return nameToIso639Id[name]
if name in aliasToIso639Id:
return aliasToIso639Id[name]
raise KeyError(name)
def isoIdToName(isoId):
global iso639IdToName
if not iso639IdToName:
iso639IdToName = {}
for name in nameToIso639Id:
iso639IdToName[nameToIso639Id[name]] = name
if isoId in iso639IdToName:
return iso639IdToName[isoId]
raise KeyError(isoId)
| gpl-3.0 | -964,277,398,414,625,000 | 49.251309 | 131 | 0.414618 | false | 3.316517 | false | false | false |
chizarlicious/chapel | util/chplenv/chpl_comm.py | 3 | 1340 | #!/usr/bin/env python
import sys, os
import chpl_compiler
import chpl_platform
from utils import memoize
import utils
@memoize
def get():
comm_val = os.environ.get('CHPL_COMM')
if not comm_val:
platform_val = chpl_platform.get('target')
compiler_val = chpl_compiler.get('target')
# use ugni on cray-x* machines using the module and supported compiler
#
# Check that target arch is not knc. Don't use chpl_arch.get(), though,
# since it already calls into this get() function. This check only
# happens for X* systems using the Cray programming environment, so it
# is safe to assume the relevant craype module will be used that sets
# CRAY_CPU_TARGET.
if (platform_val.startswith('cray-x') and
utils.using_chapel_module() and
compiler_val in ('cray-prgenv-gnu', 'cray-prgenv-intel') and
os.getenv('CRAY_CPU_TARGET', '') != 'knc'):
comm_val = 'ugni'
# automatically uses gasnet when on a cray-x* or cray-cs machine
elif platform_val.startswith('cray-'):
comm_val = 'gasnet'
else:
comm_val = 'none'
return comm_val
def _main():
comm_val = get()
sys.stdout.write("{0}\n".format(comm_val))
if __name__ == '__main__':
_main()
| apache-2.0 | 1,757,365,281,772,737,800 | 30.904762 | 79 | 0.602985 | false | 3.53562 | false | false | false |
zenweasel/cashflow2 | cashflow_project/settings/local-dist.py | 1 | 2546 | """
This is an example settings/local.py file.
These settings overrides what's in settings/base.py
"""
from . import base
# To extend any settings from settings/base.py here's an example:
INSTALLED_APPS = base.INSTALLED_APPS + ('django_nose',)
MIDDLEWARE_CLASSES = base.MIDDLEWARE_CLASSES.append('debug_toolbar.middleware.DebugToolbarMiddleware')
# Define your database connections
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'cashflow',
'USER': 'brent',
'PASSWORD': 'weasel',
'HOST': 'localhost',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = True
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
# Hardcoded values can leak through source control. Consider loading
# the secret key from an environment variable or a file instead.
SECRET_KEY = '+hr2(b6t#wa(x2pc_94pudje_%n^#88_kt49xz5q2vkubd=w%('
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
# BROKER_PORT = 5672
# BROKER_USER = 'django'
# BROKER_PASSWORD = 'django'
# BROKER_VHOST = 'django'
# CELERY_RESULT_BACKEND = 'amqp'
## Log settings
# Remove this configuration variable to use your custom logging configuration
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'loggers': {
'cashflow_project': {
'level': "DEBUG"
}
}
}
INTERNAL_IPS = ('127.0.0.1')
| bsd-3-clause | 4,926,005,510,419,285,000 | 27.288889 | 102 | 0.663394 | false | 3.412869 | false | false | false |
agmscode/agms_python | agms/request/hpp_request.py | 1 | 7917 | from __future__ import absolute_import
import re
from agms.request.request import Request
from agms.exception.request_validation_exception import RequestValidationException
class HPPRequest(Request):
"""
A class representing AGMS HPP Request objects.
"""
def __init__(self, op):
Request.__init__(self,op)
self._fields = {
'TransactionType': {'setting': '', 'value': ''},
'Amount': {'setting': '', 'value': ''},
'Tax': {'setting': '', 'value': ''},
'Shipping': {'setting': '', 'value': ''},
'OrderDescription': {'setting': '', 'value': ''},
'OrderID': {'setting': '', 'value': ''},
'PONumber': {'setting': '', 'value': ''},
'RetURL': {'setting': '', 'value': ''},
'ACHEnabled': {'setting': '', 'value': ''},
'SAFE_ID': {'setting': '', 'value': ''},
'Donation': {'setting': '', 'value': ''},
'UsageCount': {'setting': '', 'value': '9999999'},
'Internal': {'setting': '', 'value': ''},
'FirstName': {'setting': '', 'value': ''},
'LastName': {'setting': '', 'value': ''},
'Company': {'setting': '', 'value': ''},
'Address1': {'setting': '', 'value': ''},
'Address2': {'setting': '', 'value': ''},
'City': {'setting': '', 'value': ''},
'State': {'setting': '', 'value': ''},
'Zip': {'setting': '', 'value': ''},
'Country': {'setting': '', 'value': ''},
'Phone': {'setting': '', 'value': ''},
'Fax': {'setting': '', 'value': ''},
'EMail': {'setting': '', 'value': ''},
'Website': {'setting': '', 'value': ''},
'ShippingFirstName': {'setting': '', 'value': ''},
'ShippingLastName': {'setting': '', 'value': ''},
'ShippingCompany': {'setting': '', 'value': ''},
'ShippingAddress1': {'setting': '', 'value': ''},
'ShippingAddress2': {'setting': '', 'value': ''},
'ShippingCity': {'setting': '', 'value': ''},
'ShippingState': {'setting': '', 'value': ''},
'ShippingZip': {'setting': '', 'value': ''},
'ShippingCountry': {'setting': '', 'value': ''},
'ShippingEmail': {'setting': '', 'value': ''},
'ShippingPhone': {'setting': '', 'value': ''},
'ShippingFax': {'setting': '', 'value': ''},
'ProcessorID': {'setting': '', 'value': ''},
'TransactionID': {'setting': '', 'value': ''},
'Tracking_Number': {'setting': '', 'value': ''},
'Shipping_Carrier': {'setting': '', 'value': ''},
'IPAddress': {'setting': '', 'value': ''},
'Custom_Field_1': {'setting': '', 'value': ''},
'Custom_Field_2': {'setting': '', 'value': ''},
'Custom_Field_3': {'setting': '', 'value': ''},
'Custom_Field_4': {'setting': '', 'value': ''},
'Custom_Field_5': {'setting': '', 'value': ''},
'Custom_Field_6': {'setting': '', 'value': ''},
'Custom_Field_7': {'setting': '', 'value': ''},
'Custom_Field_8': {'setting': '', 'value': ''},
'Custom_Field_9': {'setting': '', 'value': ''},
'Custom_Field_10': {'setting': '', 'value': ''},
'HPPFormat': {'setting': '', 'value': ''},
'StartDate': {'setting': '', 'value': ''},
'EndDate': {'setting': '', 'value': ''},
'StartTime': {'setting': '', 'value': ''},
'EndTime': {'setting': '', 'value': ''},
'SuppressAutoSAFE': {'setting': '', 'value': ''},
}
self._optionable = [
'FirstName', 'LastName', 'Company', 'Address1', 'Address2',
'City', 'State', 'Zip', 'Country', 'Phone', 'Fax',
'EMail', 'Website', 'Tax', 'Shipping', 'OrderID',
'PONumber', 'ShippingFirstName', 'ShippingLastName', 'ShippingCompany', 'ShippingAddress1',
'ShippingAddress2', 'ShippingCity', 'ShippingState', 'ShippingZip', 'ShippingCountry',
'ShippingEmail', 'ShippingPhone', 'ShippingFax', 'ShippingTrackingNumber', 'ShippingCarrier',
'Custom_Field_1', 'Custom_Field_2', 'Custom_Field_3', 'Custom_Field_4', 'Custom_Field_5',
'Custom_Field_6', 'Custom_Field_7', 'Custom_Field_8', 'Custom_Field_9', 'Custom_Field_10'
]
self._numeric = [
'Amount',
'Tax',
'Shipping',
'ProcessorID',
'TransactionID',
'CheckABA',
'CheckAccount',
'CCNumber',
'CCExpDate'
]
self._enums = {
'TransactionType': ['sale', 'auth', 'safe only', 'capture', 'void', 'refund', 'update', 'adjustment'],
'Shipping_Carrier': ['ups', 'fedex', 'dhl', 'usps', 'UPS', 'Fedex', 'DHL', 'USPS'],
'HPPFormat': ['1', '2']
}
self._boolean = ['Donation', 'AutoSAFE', 'SupressAutoSAFE']
self._date = ['StartDate', 'EndDate']
self._digit_2 = ['State', 'ShippingState']
self._amount = ['Amount', 'TipAmount', 'Tax', 'Shipping']
self._required = ['TransactionType']
# Override mapping with api-specific field maps
self._mapping['shipping_tracking_number'] = 'Tracking_Number'
self._mapping['shipping_carrier'] = 'Shipping_Carrier'
def validate(self):
# All sales and auth require an amount unless donation
if ((not self._fields['Donation']['value'] or
self._fields['Donation']['value'] is not False) and
(self._fields['TransactionType']['value'] == 'sale' or
self._fields['TransactionType']['value'] == 'auth')):
self._required.append('Amount')
error_array = self._auto_validate()
errors = error_array['errors']
messages = error_array['messages']
# ExpDate MMYY
if ('CCExpDate' in self._fields.keys() and
self._fields['CCExpDate']['value'] and
(len(self._fields['CCExpDate']['value']) != 4 or
not re.match("^(0[1-9]|1[0-2])([0-9][0-9])$", self._fields['CCExpDate']['value']))):
errors += 1
messages.append('CCExpDate (credit card expiration date) must be MMYY.')
# CCNumber length
if ('CCNumber' in self._fields.keys() and
self._fields['CCNumber']['value'] and
len(self._fields['CCNumber']['value']) != 16 and
len(self._fields['CCNumber']['value']) != 15):
errors += 1
messages.append('CCNumber (credit card number) must be 15-16 digits long.')
# ABA length
if ('CheckABA' in self._fields.keys() and
self._fields['CheckABA']['value'] and
len(self._fields['CheckABA']['value']) != 9):
errors += 1
messages.append('CheckABA (routing number) must be 9 digits long.')
self.validate_errors = errors
self.validate_messages = messages
if errors == 0:
return {'errors': errors, 'messages': messages}
else:
raise RequestValidationException('Request validation failed with ' + ' '.join(messages))
def get_fields(self):
fields = self._get_field_array()
if 'AutoSAFE' in fields.keys():
if fields['AutoSAFE'] is True:
fields['AutoSAFE'] = 1
else:
fields['AutoSAFE'] = 0
if 'SuppressAutoSAFE' in fields.keys():
if fields['SuppressAutoSAFE'] is True:
fields['SuppressAutoSAFE'] = 1
else:
fields['SuppressAutoSAFE'] = 0
return fields
def get_params(self, request):
return {'objparameters': request} | mit | 1,617,904,537,919,609,300 | 42.988889 | 114 | 0.48099 | false | 4.057919 | false | false | false |
fernandog/Medusa | medusa/session/hooks.py | 1 | 1640 | # coding=utf-8
from __future__ import unicode_literals
import logging
from medusa.logger.adapters.style import BraceAdapter
from six import text_type
log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())
def log_url(response, **kwargs):
"""Response hook to log request URL."""
request = response.request
log.debug(
'{method} URL: {url} [Status: {status}]', {
'method': request.method,
'url': request.url,
'status': response.status_code,
}
)
log.debug('User-Agent: {}'.format(request.headers['User-Agent']))
if request.method.upper() == 'POST':
if request.body:
if 'multipart/form-data' not in request.headers.get('content-type', ''):
body = request.body
else:
body = request.body[1:99].replace('\n', ' ') + '...'
else:
body = ''
# try to log post data using various codecs to decode
if isinstance(body, text_type):
log.debug('With post data: {0}', body)
return
codecs = ('utf-8', 'latin1', 'cp1252')
for codec in codecs:
try:
data = body.decode(codec)
except UnicodeError as error:
log.debug('Failed to decode post data as {codec}: {msg}',
{'codec': codec, 'msg': error})
else:
log.debug('With post data: {0}', data)
break
else:
log.warning('Failed to decode post data with {codecs}',
{'codecs': codecs})
| gpl-3.0 | 1,730,956,940,932,370,200 | 29.943396 | 84 | 0.532927 | false | 4.162437 | false | false | false |
IntegratedAlarmSystem-Group/ias | Tools/src/main/python/IASApiDocs/DocGenerator.py | 1 | 3590 | '''
Base class for java, scala and python API docs generators
Created on Jul 7, 2017
@author: acaproni
'''
import sys
import os
import logging
class DocGenerator(object):
'''
The base class for API docs generators
'''
def __init__(self,srcFolder,dstFolder,outFile=sys.stdout):
"""
Constructor
@param srcFolder: the folder with sources to generate their documentation
@param dstFolder: destination folder for the api docs
@param outFile: the file where the output generated by calling java/scala/py-doc must be sent
"""
self.checkFolders(srcFolder,dstFolder)
self.srcFolder=srcFolder
self.dstFolder=dstFolder
self.outFile=outFile
assert self.outFile is not None
def checkFolders(self,src,dst):
"""
Check if the source and dest folders are valid and if it is not the case,
throws an exception
@param src: the folder with java sources to check
@param dst: destination folder to check
"""
# Check if src folder exists
if not os.path.exists(src):
logging.error("The source folder %s does not exist",src)
raise OSError("The source folder", src,"does not exist")
elif not os.path.isdir(src):
logging.error("The source folder %s is not a directory",src)
raise OSError("The source folder", src,"is not a directory")
# Check if the destination folder exists
if not os.path.exists(dst):
os.mkdir(dst)
if not os.path.exists(dst):
logging.error("The destination folder %s does not exist",dst)
raise OSError("The destination folder", dst,"does not exist")
elif not os.path.isdir(dst):
logging.error("The destination folder %s is not a directory",dst)
raise OSError("The destination folder", dst,"is not a directory")
def containsSources(self,folder,fileExtension):
'''
@param folder: the folder (src or test) to check if contains java sources
@param fileExtension: the extension of the files that the folder is supposed to contain
@return: True if the passed folder contains java sources
'''
for root, subdirs, files in os.walk(folder):
for file in files:
if file.endswith(fileExtension):
return True
return False
def getSrcPaths(self,sourceFolder, includeTestFolder,folderName,fileExtension):
"""
Scan the source folder and return a list of source folders
containg java files.
Java source can be contained into src or test (the latter is used only
if the includeTestFolder parameter is True)
The search is recursive because a folder can contains several modules
@param sourceFolder: root source folder (generally IAS, passed in the command line)
@param includeTestFolder: True to inculde test folders in the scan
@param folderName: the name of the folder containing the sources like java or python
@param fileExtension: the extension of the files that the folder is supposed to contain
"""
ret = []
for root, subdirs, files in os.walk(sourceFolder):
if root.endswith(os.path.sep+"main/"+folderName) or (includeTestFolder and root.endswith(os.path.sep+"test/"+folderName)):
if self.containsSources(root,fileExtension):
ret.append(root)
return ret
| lgpl-3.0 | -3,484,627,717,770,637,300 | 40.264368 | 134 | 0.637326 | false | 4.58493 | true | false | false |
JFinis/serverscript | src/ss/jobs/sync_job.py | 1 | 1469 | '''
Created on 21.01.2017
@author: gex
'''
from ss.jobs.job import Job
from ss.rsync import RSync
class SyncJob(Job):
'''
Synchronizes the 'to' directory with the 'from' directory.
I.e., the directories will be exectly the same after the command
'''
FROM_KEY='from'
TO_KEY='to'
EXCLUDE_KEY='exclude'
def __init__(self,config,lastExecConfig,name):
super(SyncJob, self).__init__(config,lastExecConfig,name)
self._fromPath=self.getRequiredConfPath(SyncJob.FROM_KEY)
self._toPath=self.getRequiredConfPath(SyncJob.TO_KEY)
self._excludes=self.getOptionalConfStr(SyncJob.EXCLUDE_KEY).split(';')
# Only non-empty entries, strip whitespace
self._excludes=[x.strip() for x in self._excludes if len(x.strip()) > 0]
def execute(self):
rsync=RSync()
rsync.setArchive(True) # Archive mode, preserve file attributes and more
rsync.setStats(True) # Print stats
rsync.setUpdate(True) # Update only files that are newer than files on
rsync.setDelete(True) # Delete files on the receiver side that don't exist on the sender side
if len(self._excludes) > 0:
rsync.setDeleteExcluded(True) # Delete excluded dirs from receiver, if present
rsync.setExcludes(self._excludes)
# Execute
rsync.execute(self._fromPath,self._toPath)
return True | mit | 1,426,035,962,513,237,000 | 33.02381 | 101 | 0.633084 | false | 3.855643 | false | false | false |
GirlsCodePy/girlscode-coursebuilder | modules/dashboard/dashboard.py | 3 | 26359 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to create and manage Courses."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import datetime
import logging
import os
import urllib
import uuid
import appengine_config
from filer import AssetItemRESTHandler
from filer import FileManagerAndEditor
from filer import FilesItemRESTHandler
from filer import TextAssetRESTHandler
from label_editor import LabelManagerAndEditor, TrackManagerAndEditor
from label_editor import LabelRestHandler, TrackRestHandler
import messages
from question_editor import GeneralQuestionRESTHandler
from question_editor import GiftQuestionRESTHandler
from question_editor import McQuestionRESTHandler
from question_editor import QuestionManagerAndEditor
from question_editor import SaQuestionRESTHandler
from question_group_editor import QuestionGroupManagerAndEditor
from question_group_editor import QuestionGroupRESTHandler
from role_editor import RoleManagerAndEditor
from role_editor import RoleRESTHandler
import utils as dashboard_utils
from common import crypto
from common import jinja_utils
from common import safe_dom
from common import tags
from common import users
from common.utils import Namespace
from controllers import sites
from controllers.utils import ApplicationHandler
from controllers.utils import CourseHandler
from controllers.utils import ReflectiveRequestHandler
from models import config
from models import custom_modules
from models import roles
from models import services
from models.models import RoleDAO
from common import menus
from google.appengine.api import app_identity
custom_module = None
TEMPLATE_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', 'dashboard', 'templates')
class DashboardHandler(
CourseHandler, FileManagerAndEditor,
LabelManagerAndEditor, TrackManagerAndEditor, QuestionGroupManagerAndEditor,
QuestionManagerAndEditor, ReflectiveRequestHandler, RoleManagerAndEditor):
"""Handles all pages and actions required for managing a course."""
# This dictionary allows the dashboard module to optionally nominate a
# specific sub-tab within each major tab group as the default sub-tab to
# open when first navigating to that major tab. The default may be
# explicitly specified here so that sub-tab registrations from other
# modules do not inadvertently take over the first position due to order
# of module registration.
default_subtab_action = collections.defaultdict(lambda: None)
get_actions = [
'edit_settings', 'edit_unit_lesson',
'manage_asset', 'manage_text_asset',
'add_mc_question', 'add_sa_question',
'edit_question', 'add_question_group', 'edit_question_group',
'question_preview', 'question_group_preview',
'add_label', 'edit_label', 'add_track', 'edit_track',
'add_role', 'edit_role',
'import_gift_questions']
# Requests to these handlers automatically go through an XSRF token check
# that is implemented in ReflectiveRequestHandler.
post_actions = [
'create_or_edit_settings',
'add_to_question_group',
'clone_question']
child_routes = [
(AssetItemRESTHandler.URI, AssetItemRESTHandler),
(FilesItemRESTHandler.URI, FilesItemRESTHandler),
(LabelRestHandler.URI, LabelRestHandler),
(TrackRestHandler.URI, TrackRestHandler),
(McQuestionRESTHandler.URI, McQuestionRESTHandler),
(GiftQuestionRESTHandler.URI, GiftQuestionRESTHandler),
(SaQuestionRESTHandler.URI, SaQuestionRESTHandler),
(GeneralQuestionRESTHandler.URI, GeneralQuestionRESTHandler),
(TextAssetRESTHandler.URI, TextAssetRESTHandler),
(QuestionGroupRESTHandler.URI, QuestionGroupRESTHandler),
(RoleRESTHandler.URI, RoleRESTHandler)]
# List of functions which are used to generate content displayed at the top
# of every dashboard page. Use this with caution, as it is extremely
# invasive of the UX. Each function receives the handler as arg and returns
# an object to be inserted into a Jinja template (e.g. a string, a safe_dom
# Node or NodeList, or a jinja2.Markup).
PAGE_HEADER_HOOKS = []
# A list of hrefs for extra CSS files to be included in dashboard pages.
# Files listed here by URL will be available on every Dashboard page.
EXTRA_CSS_HREF_LIST = []
# A list of hrefs for extra JS files to be included in dashboard pages.
# Files listed here by URL will be available on every Dashboard page.
EXTRA_JS_HREF_LIST = []
# A list of template locations to be included in dashboard pages
ADDITIONAL_DIRS = []
# Dictionary that maps external permissions to their descriptions
_external_permissions = {}
# Dictionary that maps actions to permissions
_get_action_to_permission = {}
_post_action_to_permission = {}
default_action = None
GetAction = collections.namedtuple('GetAction', ['handler', 'in_action'])
_custom_get_actions = {} # Map of name to GetAction
_custom_post_actions = {} # Map of name to handler callback.
# Create top level menu groups which other modules can register against.
# I would do this in "register", but other modules register first.
actions_to_menu_items = {}
root_menu_group = menus.MenuGroup('dashboard', 'Dashboard')
@classmethod
def add_nav_mapping(cls, name, title, **kwargs):
"""Create a top level nav item."""
menu_item = cls.root_menu_group.get_child(name)
if menu_item is None:
is_link = kwargs.get('href')
menu_cls = menus.MenuItem if is_link else menus.MenuGroup
menu_item = menu_cls(
name, title, group=cls.root_menu_group, **kwargs)
if not is_link:
# create the basic buckets
pinned = menus.MenuGroup(
'pinned', None, placement=1000, group=menu_item)
default = menus.MenuGroup(
'default', None, placement=2000, group=menu_item)
advanced = menus.MenuGroup(
'advanced', None,
placement=menus.MenuGroup.DEFAULT_PLACEMENT * 2,
group=menu_item)
return menu_item
@classmethod
def get_nav_title(cls, action):
item = cls.actions_to_menu_items.get(action)
if item:
return item.group.group.title + " > " + item.title
else:
return None
@classmethod
def add_sub_nav_mapping(
cls, group_name, item_name, title, action=None, contents=None,
can_view=None, href=None, no_app_context=False,
sub_group_name=None, **kwargs):
"""Create a second level nav item.
Args:
group_name: Name of an existing top level nav item to use as the
parent
item_name: A unique key for this item
title: Human-readable label
action: A unique operation ID for
contents: A handler which will be added as a custom get-action on
DashboardHandler
can_view: Pass a boolean function here if your handler has
additional permissions logic in it that the dashboard does not
check for you. You must additionally check it in your handler.
sub_group_name: The sub groups 'pinned', 'default', and 'advanced'
exist in that order and 'default' is used by default. You can
pass some other string to create a new group at the end.
other arguments: see common/menus.py
"""
group = cls.root_menu_group.get_child(group_name)
if group is None:
logging.critical('The group %s does not exist', group_name)
return
if sub_group_name is None:
sub_group_name = 'default'
sub_group = group.get_child(sub_group_name)
if not sub_group:
sub_group = menus.MenuGroup(
sub_group_name, None, group=group)
item = sub_group.get_child(item_name)
if item:
logging.critical(
'There is already a sub-menu item named "%s" registered in '
'group %s subgroup %s.', item_name, group_name, sub_group_name)
return
if contents:
action = action or group_name + '_' + item_name
if action and not href:
href = "dashboard?action={}".format(action)
def combined_can_view(app_context):
if action:
# Current design disallows actions at the global level.
# This might change in the future.
if not app_context and not no_app_context:
return False
# Check permissions in the dashboard
if not cls.can_view(action):
return False
# Additional custom visibility check
if can_view and not can_view(app_context):
return False
return True
item = menus.MenuItem(
item_name, title, action=action, group=sub_group,
can_view=combined_can_view, href=href, **kwargs)
cls.actions_to_menu_items[action] = item
if contents:
cls.add_custom_get_action(action, handler=contents)
@classmethod
def add_custom_get_action(cls, action, handler=None, in_action=None,
overwrite=False):
if not action:
logging.critical('Action not specified. Ignoring.')
return False
if not handler:
logging.critical(
'For action : %s handler can not be null.', action)
return False
if ((action in cls._custom_get_actions or action in cls.get_actions)
and not overwrite):
logging.critical(
'action : %s already exists. Ignoring the custom get action.',
action)
return False
cls._custom_get_actions[action] = cls.GetAction(handler, in_action)
return True
@classmethod
def remove_custom_get_action(cls, action):
if action in cls._custom_get_actions:
cls._custom_get_actions.pop(action)
@classmethod
def add_custom_post_action(cls, action, handler, overwrite=False):
if not handler or not action:
logging.critical('Action or handler can not be null.')
return False
if ((action in cls._custom_post_actions or action in cls.post_actions)
and not overwrite):
logging.critical(
'action : %s already exists. Ignoring the custom post action.',
action)
return False
cls._custom_post_actions[action] = handler
return True
@classmethod
def remove_custom_post_action(cls, action):
if action in cls._custom_post_actions:
cls._custom_post_actions.pop(action)
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return cls.child_routes
@classmethod
def can_view(cls, action):
"""Checks if current user has viewing rights."""
app_context = sites.get_app_context_for_current_request()
if action in cls._get_action_to_permission:
return cls._get_action_to_permission[action](app_context)
return roles.Roles.is_course_admin(app_context)
@classmethod
def can_edit(cls, action):
"""Checks if current user has editing rights."""
app_context = sites.get_app_context_for_current_request()
if action in cls._post_action_to_permission:
return cls._post_action_to_permission[action](app_context)
return roles.Roles.is_course_admin(app_context)
def default_action_for_current_permissions(self):
"""Set the default or first active navigation tab as default action."""
item = self.root_menu_group.first_visible_item(self.app_context)
if item:
return item.action
def get(self):
"""Enforces rights to all GET operations."""
action = self.request.get('action')
if not action:
self.default_action = self.default_action_for_current_permissions()
action = self.default_action
self.action = action
if not self.can_view(action):
self.redirect(self.app_context.get_slug())
return
if action in self._custom_get_actions:
result = self._custom_get_actions[action].handler(self)
if result is None:
return
# The following code handles pages for actions that do not write out
# their responses.
template_values = {
'page_title': self.format_title(self.get_nav_title(action)),
}
if isinstance(result, dict):
template_values.update(result)
else:
template_values['main_content'] = result
self.render_page(template_values)
return
# Force reload of properties. It is expensive, but admin deserves it!
config.Registry.get_overrides(force_update=True)
return super(DashboardHandler, self).get()
def post(self):
"""Enforces rights to all POST operations."""
action = self.request.get('action')
self.action = action
if not self.can_edit(action):
self.redirect(self.app_context.get_slug())
return
if action in self._custom_post_actions:
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not crypto.XsrfTokenManager.is_xsrf_token_valid(
xsrf_token, action):
self.error(403)
return
self._custom_post_actions[action](self)
return
return super(DashboardHandler, self).post()
def get_template(self, template_name, dirs=None):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, (dirs or []) + [TEMPLATE_DIR], handler=self)
def get_alerts(self):
alerts = []
if not self.app_context.is_editable_fs():
alerts.append('Read-only course.')
if not self.app_context.now_available:
alerts.append('The course is not publicly available.')
return '\n'.join(alerts)
def _get_current_menu_action(self):
registered_action = self._custom_get_actions.get(self.action)
if registered_action:
registered_in_action = registered_action.in_action
if registered_in_action:
return registered_in_action
return self.action
def render_page(self, template_values, in_action=None):
"""Renders a page using provided template values."""
template_values['header_title'] = template_values['page_title']
template_values['page_headers'] = [
hook(self) for hook in self.PAGE_HEADER_HOOKS]
template_values['course_title'] = self.app_context.get_title()
current_action = in_action or self._get_current_menu_action()
template_values['current_menu_item'] = self.actions_to_menu_items.get(
current_action)
template_values['courses_menu_item'] = self.actions_to_menu_items.get(
'courses')
template_values['root_menu_group'] = self.root_menu_group
template_values['course_app_contexts'] = get_visible_courses()
template_values['app_context'] = self.app_context
template_values['current_course'] = self.get_course()
template_values['gcb_course_base'] = self.get_base_href(self)
template_values['user_nav'] = safe_dom.NodeList().append(
safe_dom.Text('%s | ' % users.get_current_user().email())
).append(
safe_dom.Element(
'a', href=users.create_logout_url(self.request.uri)
).add_text('Logout'))
template_values[
'page_footer'] = 'Page created on: %s' % datetime.datetime.now()
template_values['coursebuilder_version'] = (
os.environ['GCB_PRODUCT_VERSION'])
template_values['application_id'] = app_identity.get_application_id()
version = os.environ['CURRENT_VERSION_ID']
if '.' not in version or not appengine_config.PRODUCTION_MODE:
template_values['application_version'] = version
else:
version, deployed_at = version.split('.', 1)
template_values['application_version'] = version
template_values['deployed_at'] = datetime.datetime.utcfromtimestamp(
int(deployed_at) >> 28) # Yes, really.
template_values['extra_css_href_list'] = self.EXTRA_CSS_HREF_LIST
template_values['extra_js_href_list'] = self.EXTRA_JS_HREF_LIST
template_values['powered_by_url'] = services.help_urls.get(
'dashboard:powered_by')
if not template_values.get('sections'):
template_values['sections'] = []
if not appengine_config.PRODUCTION_MODE:
template_values['page_uuid'] = str(uuid.uuid1())
self.response.write(
self.get_template('view.html').render(template_values))
@classmethod
def register_courses_menu_item(cls, menu_item):
cls.actions_to_menu_items['courses'] = menu_item
def format_title(self, text):
"""Formats standard title with or without course picker."""
ret = safe_dom.NodeList()
cb_text = 'Course Builder '
ret.append(safe_dom.Text(cb_text))
ret.append(safe_dom.Entity('>'))
ret.append(safe_dom.Text(' %s ' % self.app_context.get_title()))
ret.append(safe_dom.Entity('>'))
dashboard_text = ' Dashboard '
ret.append(safe_dom.Text(dashboard_text))
ret.append(safe_dom.Entity('>'))
ret.append(safe_dom.Text(' %s' % text))
return ret
def get_action_url(self, action, key=None, extra_args=None, fragment=None):
args = {'action': action}
if key:
args['key'] = key
if extra_args:
args.update(extra_args)
url = '/dashboard?%s' % urllib.urlencode(args)
if fragment:
url += '#' + fragment
return self.canonicalize_url(url)
def _render_roles_list(self):
"""Render roles list to HTML."""
all_roles = sorted(RoleDAO.get_all(), key=lambda role: role.name)
return safe_dom.Template(
self.get_template('role_list.html'), roles=all_roles)
def _render_roles_view(self):
"""Renders course roles view."""
actions = [{
'id': 'add_role',
'caption': 'Add Role',
'href': self.get_action_url('add_role')}]
sections = [{
'description': messages.ROLES_DESCRIPTION,
'actions': actions,
'pre': self._render_roles_list()
}]
template_values = {
'page_title': self.format_title('Roles'),
'sections': sections,
}
return template_values
@classmethod
def map_get_action_to_permission(cls, action, module, perm):
"""Maps a view/get action to a permission.
Map a GET action that goes through the dashboard to a
permission to control which users have access.
Example:
The i18n module maps multiple actions to the permission
'access_i18n_dashboard'. Users who have a role assigned with this
permission are then allowed to perform these actions and thus
access the translation tools.
Args:
action: a string specifying the action to map.
module: The module with which the permission was registered via
a call to models.roles.Roles.register_permission()
permission: a string specifying the permission to which the action
should be mapped.
"""
checker = lambda ctx: roles.Roles.is_user_allowed(ctx, module, perm)
cls.map_get_action_to_permission_checker(action, checker)
@classmethod
def map_get_action_to_permission_checker(cls, action, checker):
"""Map an action to a function to check permissions.
Some actions (notably settings and the course overview) produce pages
that have items that may be controlled by multiple permissions or
more complex verification than a single permission allows. This
function allows modules to specify check functions.
Args:
action: A string specifying the name of the action being checked.
This should have been registered via add_custom_get_action(),
or present in the 'get_actions' list above in this file.
checker: A function which is run when the named action is accessed.
Registered functions should expect one parameter: the application
context object, and return a Boolean value.
"""
cls._get_action_to_permission[action] = checker
@classmethod
def unmap_get_action_to_permission(cls, action):
del cls._get_action_to_permission[action]
@classmethod
def map_post_action_to_permission(cls, action, module, perm):
"""Maps an edit action to a permission. (See 'get' version, above.)"""
checker = lambda ctx: roles.Roles.is_user_allowed(ctx, module, perm)
cls.map_post_action_to_permission_checker(action, checker)
@classmethod
def map_post_action_to_permission_checker(cls, action, checker):
"""Map an edit action to check function. (See 'get' version, above)."""
cls._post_action_to_permission[action] = checker
@classmethod
def unmap_post_action_to_permission(cls, action):
"""Remove mapping to edit action. (See 'get' version, above)."""
del cls._post_action_to_permission[action]
@classmethod
def deprecated_add_external_permission(cls, permission_name,
permission_description):
"""Adds extra permissions that will be registered by the Dashboard.
Normally, permissions should be registered in their own modules.
Due to historical accident, the I18N module registers permissions
with the dashboard. For backward compatibility with existing roles,
this API is preserved, but not suggested for use by future modules.
"""
cls._external_permissions[permission_name] = permission_description
@classmethod
def remove_external_permission(cls, permission_name):
del cls._external_permissions[permission_name]
@classmethod
def permissions_callback(cls, unused_app_context):
return cls._external_permissions.iteritems()
@classmethod
def current_user_has_access(cls, app_context):
return cls.root_menu_group.can_view(app_context, exclude_links=True)
@classmethod
def generate_dashboard_link(cls, app_context):
if cls.current_user_has_access(app_context):
return [('dashboard', 'Dashboard')]
return []
def make_help_menu():
DashboardHandler.add_nav_mapping('help', 'Help', placement=6000)
DashboardHandler.add_sub_nav_mapping(
'help', 'documentation', 'Documentation',
href=services.help_urls.get('help:documentation'), target='_blank')
DashboardHandler.add_sub_nav_mapping(
'help', 'forum', 'Support', href=services.help_urls.get('help:forum'),
target='_blank')
DashboardHandler.add_sub_nav_mapping(
'help', 'videos', 'Videos', href=services.help_urls.get('help:videos'),
target='_blank')
def get_visible_courses():
result = []
for app_context in sorted(sites.get_all_courses(),
key=lambda course: course.get_title().lower()):
with Namespace(app_context.namespace):
if DashboardHandler.current_user_has_access(app_context):
result.append(app_context)
return result
def register_module():
"""Registers this module in the registry."""
DashboardHandler.add_nav_mapping('edit', 'Create', placement=1000)
DashboardHandler.add_nav_mapping('style', 'Style', placement=2000)
DashboardHandler.add_nav_mapping('publish', 'Publish', placement=3000)
DashboardHandler.add_nav_mapping('analytics', 'Manage', placement=4000)
DashboardHandler.add_nav_mapping('settings', 'Settings', placement=5000)
make_help_menu()
# pylint: disable=protected-access
DashboardHandler.add_sub_nav_mapping(
'settings', 'roles', 'Roles', action='edit_roles',
contents=DashboardHandler._render_roles_view)
# pylint: enable=protected-access
def on_module_enabled():
roles.Roles.register_permissions(
custom_module, DashboardHandler.permissions_callback)
ApplicationHandler.AUTH_LINKS.append(
DashboardHandler.generate_dashboard_link)
global_routes = [
(dashboard_utils.RESOURCES_PATH +'/js/.*', tags.JQueryHandler),
(dashboard_utils.RESOURCES_PATH + '/.*',
tags.DeprecatedResourcesHandler)]
dashboard_handlers = [
('/dashboard', DashboardHandler),
]
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
'Course Dashboard',
'A set of pages for managing Course Builder course.',
global_routes, dashboard_handlers,
notify_module_enabled=on_module_enabled)
return custom_module
| gpl-3.0 | -358,744,994,067,295,900 | 38.998483 | 80 | 0.638833 | false | 4.213395 | false | false | false |
stormi/tsunami | src/primaires/communication/contextes/immersion.py | 1 | 14322 | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte 'communication:immersion'"""
from primaires.format.constantes import ponctuations_finales
from primaires.interpreteur.contexte import Contexte
from primaires.communication.contextes.invitation import Invitation
class Immersion(Contexte):
"""Contexte d'immersion dans un canal de communication.
"""
def __init__(self, pere):
"""Constructeur du contexte"""
Contexte.__init__(self, pere)
self.opts.prompt_prf = ""
self.opts.prompt_clr = ""
self.canal = None
self.options = {
# Options d'user
"q" : self.opt_quit,
"w" : self.opt_who,
"h" : self.opt_help,
"i" : self.opt_invite,
"me" : self.opt_emote,
# Options de modo
"e" : self.opt_eject,
"b" : self.opt_ban,
"a" : self.opt_announce,
# Options d'admin
"p" : self.opt_promote,
"ed" : self.opt_edit,
"d" : self.opt_dissolve,
}
def __getstate__(self):
"""Nettoyage des options"""
dico_attr = Contexte.__getstate__(self)
dico_attr["options"] = dico_attr["options"].copy()
for rac, fonction in dico_attr["options"].items():
dico_attr["options"][rac] = fonction.__name__
return dico_attr
def __setstate__(self, dico_attr):
"""Récupération du contexte"""
Contexte.__setstate__(self, dico_attr)
for rac, nom in self.options.items():
fonction = getattr(self, nom)
self.options[rac] = fonction
@property
def u_nom(self):
return "immersion:" + self.canal.nom
def accueil(self):
"""Message d'accueil du contexte"""
canal = self.canal
res = canal.clr + ">|ff| Immersion dans le canal " + canal.nom
res += "\n Entrez |ent|/h|ff| pour afficher l'aide."
return res
def opt_quit(self, arguments):
"""Option quitter : /q"""
canal = self.canal
personnage = self.pere.joueur
canal.immerger_ou_sortir(personnage)
personnage << canal.clr + ">|ff| Retour au jeu."
def opt_who(self, arguments):
"""Option qui : /w"""
personnage = self.pere.joueur
res = self.canal.clr + ">|ff| Joueurs connectés :"
for connecte in self.canal.connectes:
if connecte in type(self).importeur.connex.joueurs_connectes:
if connecte is self.canal.auteur:
statut = "|rgc|@"
elif connecte in self.canal.moderateurs:
statut = "|jn|*"
else:
statut = "|bc|"
res += "\n " + statut + connecte.nom + "|ff|"
if connecte in self.canal.immerges:
res += " (immergé)"
personnage << res
def opt_help(self, arguments):
"""Options d'affichage de l'aide : /h"""
personnage = self.pere.joueur
canal = self.canal
res = canal.clr + ">|ff| Aide du canal |ent|{}|ff| ({}) :\n".format(
canal.nom, canal.resume)
res += str(canal.description)
res += "\n Administrateur : |rgc|"
res += (canal.auteur and canal.auteur.nom or "aucun") + "|ff|"
modos = ""
if len(canal.moderateurs) == 1:
modos = "\n Modérateur : |jn|" + canal.moderateurs[0].nom + "|ff|"
elif len(canal.moderateurs) > 1:
modos = "\n Modérateurs : |jn|" + "|ff|, |jn|".join(
sorted([modo.nom for modo in canal.moderateurs])) + "|ff|"
res += modos
res += "\n Commandes disponibles :"
res += "\n - |cmd|/h|ff| : affiche ce message d'aide"
res += "\n - |cmd|/w|ff| : liste les joueurs connectés au canal"
res += "\n - |cmd|/i <joueur>|ff| : invite un joueur à rejoindre "
res += "le canal"
res += "\n - |cmd|/me <message>|ff| : joue une emote dans le canal"
res += "\n - |cmd|/q|ff| : permet de sortir du mode immersif"
if personnage in canal.moderateurs or personnage is canal.auteur \
or personnage.est_immortel():
res += "\n Commandes de modération :"
res += "\n - |cmd|/e <joueur>|ff| : éjecte un joueur"
res += "\n - |cmd|/b <joueur>|ff| : bannit ou rappelle un joueur"
res += "\n - |cmd|/a <message>|ff| : permet d'envoyer une "
res += "annonce impersonnelle"
if personnage is canal.auteur or personnage.est_immortel():
res += "\n Commandes d'administration :"
res += "\n - |cmd|/p <joueur>|ff| : promeut ou déchoit un joueur "
res += "modérateur"
res += "\n - |cmd|/ed|ff| : ouvre l'éditeur du canal"
res += "\n - |cmd|/d|ff| : dissout le canal"
personnage << res
def opt_invite(self, arguments):
"""Option pour inviter un ami à rejoindre le cana : /i <joueur>"""
canal = self.canal
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez spécifier un joueur.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for t_joueur in type(self).importeur.connex.joueurs_connectes:
if nom_joueur == t_joueur.nom.lower():
joueur = t_joueur
break
if joueur is None:
self.pere.joueur << "|err|Le joueur passé en paramètre n'a pu " \
"être trouvé.|ff|"
return
if joueur in canal.connectes:
self.pere.joueur << "|err|Ce joueur est déjà connecté au canal.|ff|"
return
contexte = Invitation(joueur.instance_connexion)
contexte.emetteur = self.pere.joueur
contexte.canal = canal
contexte.actualiser()
self.pere.joueur << "|att|Vous venez d'inviter {} à rejoindre le " \
"canal {}.|ff|".format(joueur.nom, canal.nom)
def opt_emote(self, arguments):
"""Option d'emote dans le contexte immersif"""
canal = self.canal
joueur = self.pere.joueur
if not arguments or arguments.isspace():
joueur << "|err|Vous devez préciser une action.|ff|"
return
message = arguments.rstrip(" \n")
if not message[-1] in ponctuations_finales:
message += "."
im = canal.clr + "<" + joueur.nom + " " + message + ">|ff|"
ex = canal.clr + "[" + canal.nom + "] " + joueur.nom + " "
ex += message + "|ff|"
for connecte in canal.connectes:
if connecte in type(self).importeur.connex.joueurs_connectes:
if connecte in canal.immerges:
connecte << im
else:
connecte << ex
def opt_eject(self, arguments):
"""Option permettant d'éjecter un joueur connecté : /e <joueur>"""
canal = self.canal
if not self.pere.joueur in canal.moderateurs and \
self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
if not arguments or arguments.isspace():
self.pere.joueur << "|err|Vous devez spécifier un joueur.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for connecte in canal.connectes:
if nom_joueur == connecte.nom.lower():
joueur = connecte
break
if joueur is None:
self.pere.joueur << "|err|Ce joueur n'est pas connecté au " \
"canal.|ff|"
return
if joueur is self.pere.joueur:
self.pere.joueur << "|err|Vous ne pouvez vous éjecter " \
"vous-même.|ff|"
return
if joueur in canal.moderateurs or joueur is canal.auteur:
self.pere.joueur << "|err|Vous ne pouvez éjecter ce joueur.|ff|"
return
canal.ejecter(joueur)
def opt_ban(self, arguments):
"""Option permettant de bannir un joueur connecté : /b <joueur>"""
canal = self.canal
if not self.pere.joueur in canal.moderateurs and \
self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for t_joueur in type(self).importeur.connex.joueurs:
if nom_joueur == t_joueur.nom.lower():
joueur = t_joueur
break
if joueur is None:
self.pere.joueur << "|err|Le joueur passé en paramètre n'a pu " \
"être trouvé.|ff|"
return
if joueur is self.pere.joueur:
self.pere.joueur << "|err|Vous ne pouvez vous bannir vous-même.|ff|"
return
if joueur in canal.moderateurs or joueur is canal.auteur:
self.pere.joueur << "|err|Vous ne pouvez éjecter ce joueur.|ff|"
return
canal.bannir(joueur)
def opt_announce(self, arguments):
"""Option permettant d'envoyer une annonce : /a <message>"""
canal = self.canal
if not self.pere.joueur in canal.moderateurs and \
self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
message = arguments.rstrip(" \n")
canal.envoyer_imp(message)
def opt_promote(self, arguments):
"""Option permettant de promouvoir un joueur connecté : /p <joueur>"""
canal = self.canal
if self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
nom_joueur = arguments.split(" ")[0]
joueur = None
for connecte in canal.connectes:
if nom_joueur == connecte.nom.lower():
joueur = connecte
break
if joueur is None:
self.pere.joueur << "|err|Ce joueur n'est pas connecté au " \
"canal.|ff|"
return
if joueur is self.pere.joueur:
self.pere.joueur << "|err|Vous ne pouvez vous promouvoir " \
"vous-même.|ff|"
return
if joueur is canal.auteur:
self.pere.joueur << "|err|Ce joueur est déjà administrateur.|ff|"
return
canal.promouvoir_ou_dechoir(joueur)
def opt_edit(self, arguments):
"""Option ouvrant un éditeur du canal"""
canal = self.canal
if self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
editeur = type(self).importeur.interpreteur.construire_editeur(
"chedit", self.pere.joueur, canal)
self.pere.joueur.contextes.ajouter(editeur)
editeur.actualiser()
def opt_dissolve(self, arguments):
"""Option permettant de dissoudre le canal"""
canal = self.canal
if self.pere.joueur is not canal.auteur and not \
self.pere.joueur.est_immortel():
self.pere.joueur << "|err|Vous n'avez pas accès à cette option.|ff|"
return
joueur = self.pere.joueur
canal.immerger_ou_sortir(joueur, False)
canal.rejoindre_ou_quitter(joueur, False)
joueur << "|err|Le canal {} a été dissous.|ff|".format(canal.nom)
canal.dissoudre()
def interpreter(self, msg):
"""Méthode d'interprétation du contexte"""
if msg.startswith("/"):
# C'est une option
# On extrait le nom de l'option
mots = msg.split(" ")
option = mots[0][1:]
arguments = " ".join(mots[1:])
if option not in self.options.keys():
self.pere << "|err|Option invalide ({}).|ff|".format(option)
else: # On appelle la fonction correspondante à l'option
fonction = self.options[option]
fonction(arguments)
else:
self.canal.envoyer(self.pere.joueur, msg)
| bsd-3-clause | 2,451,683,924,665,189,000 | 41.317507 | 80 | 0.563775 | false | 3.261148 | false | false | false |
jstasiak/pykka-injector | setup.py | 1 | 1150 | from os.path import abspath, dirname, join
from setuptools import setup
PROJECT_ROOT = abspath(dirname(__file__))
long_description = open(join(PROJECT_ROOT, 'README.rst')).read()
description = (
'Pykka (actor model implementation) and Injector '
'(dependency injection framework) integration module'
)
module_code = open(join(PROJECT_ROOT, 'pykka_injector.py')).readlines()
line = [line for line in module_code if line.startswith('__version__ = ')][0]
version = line.split('=')[-1].strip().strip("'")
if __name__ == '__main__':
setup(
name='pykka-injector',
url='http://github.com/jstasiak/pykka-injector',
download_url='http://pypi.python.org/pypi/pykka-injector',
version=version,
description=description,
long_description=long_description,
license='MIT',
platforms=['any'],
py_modules=['pykka_injector'],
author='Jakub Stasiak',
author_email='jakub@stasiak.at',
install_requires=[
'setuptools >= 0.6b1',
'pykka',
'injector',
],
keywords='Dependency Injection,Injector,Pykka',
)
| mit | -1,447,326,377,970,167,000 | 30.944444 | 77 | 0.618261 | false | 3.59375 | false | false | false |
LxMLS/lxmls-toolkit | labs/scripts/non_linear_classifiers/exercise_2.py | 1 | 3897 |
# coding: utf-8
# ### Amazon Sentiment Data
# In[ ]:
import numpy as np
import lxmls.readers.sentiment_reader as srs
from lxmls.deep_learning.utils import AmazonData
corpus = srs.SentimentCorpus("books")
data = AmazonData(corpus=corpus)
# ### Exercise 2.2 Implement Backpropagation for an MLP in Numpy and train it
# Instantiate the feed-forward model class and optimization parameters. This models follows the architecture described in Algorithm 10.
# In[ ]:
# Model
geometry = [corpus.nr_features, 20, 2]
activation_functions = ['sigmoid', 'softmax']
# Optimization
learning_rate = 0.05
num_epochs = 10
batch_size = 30
# In[ ]:
from lxmls.deep_learning.numpy_models.mlp import NumpyMLP
model = NumpyMLP(
geometry=geometry,
activation_functions=activation_functions,
learning_rate=learning_rate
)
# #### Milestone 1:
# Open the code for this model. This is located in
#
# lxmls/deep_learning/numpy_models/mlp.py
#
# Implement the method `backpropagation()` in the class `NumpyMLP` using Backpropagation recursion that we just saw.
#
# As a first step focus on getting the gradients of each layer, one at a time. Use the code below to plot the loss values for the study weight and perturbed versions.
# In[ ]:
from lxmls.deep_learning.mlp import get_mlp_parameter_handlers, get_mlp_loss_range
# Get functions to get and set values of a particular weight of the model
get_parameter, set_parameter = get_mlp_parameter_handlers(
layer_index=1,
is_bias=False,
row=0,
column=0
)
# Get batch of data
batch = data.batches('train', batch_size=batch_size)[0]
# Get loss and weight value
current_loss = model.cross_entropy_loss(batch['input'], batch['output'])
current_weight = get_parameter(model.parameters)
# Get range of values of the weight and loss around current parameters values
weight_range, loss_range = get_mlp_loss_range(model, get_parameter, set_parameter, batch)
# Once you have implemented at least the gradient of the last layer. You can start checking if the values match
# In[ ]:
# Get the gradient value for that weight
gradients = model.backpropagation(batch['input'], batch['output'])
current_gradient = get_parameter(gradients)
# Now you can plot the values of the loss around a given parameters value versus the gradient. If you have implemented this correctly the gradient should be tangent to the loss at the current weight value, see Figure 3.5. Once you have completed the exercise, you should be able to plot also the gradients of the other layers. Take into account that the gradients for the first layer will only be non zero for the indices of words present in the batch. You can locate this using.
# In[ ]:
# Use this to know the non-zero values of the input (that have non-zero gradient)
batch['input'][0].nonzero()
# Copy the following code for plotting
# In[ ]:
import matplotlib.pyplot as plt
# Plot empirical
plt.plot(weight_range, loss_range)
plt.plot(current_weight, current_loss, 'xr')
plt.ylabel('loss value')
plt.xlabel('weight value')
# Plot real
h = plt.plot(
weight_range,
current_gradient*(weight_range - current_weight) + current_loss,
'r--'
)
plt.show()
# #### Milestone 2:
# After you have ensured that your Backpropagation algorithm is correct, you can train a model with the data we have.
# In[ ]:
# Get batch iterators for train and test
train_batches = data.batches('train', batch_size=batch_size)
test_set = data.batches('test', batch_size=None)[0]
# Epoch loop
for epoch in range(num_epochs):
# Batch loop
for batch in train_batches:
model.update(input=batch['input'], output=batch['output'])
# Prediction for this epoch
hat_y = model.predict(input=test_set['input'])
# Evaluation
accuracy = 100*np.mean(hat_y == test_set['output'])
# Inform user
print("Epoch %d: accuracy %2.2f %%" % (epoch+1, accuracy))
| mit | -5,639,140,880,735,986,000 | 26.835714 | 479 | 0.725173 | false | 3.529891 | false | false | false |
itdxer/neupy | examples/cnn/imagenet_tools.py | 1 | 3761 | from __future__ import division
import os
import requests
from tqdm import tqdm
import numpy as np
from imageio import imread
from skimage import transform
from neupy.utils import asfloat
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
FILES_DIR = os.path.join(CURRENT_DIR, 'files')
IMAGENET_CLASSES_FILE = os.path.join(FILES_DIR, 'imagenet_classes.txt')
def download_file(url, filepath, description=''):
head_response = requests.head(url)
filesize = int(head_response.headers['content-length'])
response = requests.get(url, stream=True)
chunk_size = int(1e7)
n_iter = (filesize // chunk_size) + 1
print(description)
print('URL: {}'.format(url))
with open(filepath, "wb") as handle:
for data in tqdm(response.iter_content(chunk_size), total=n_iter):
handle.write(data)
print('Downloaded sucessfully')
def read_image(image_name, image_size=None, crop_size=None):
image = imread(image_name, pilmode='RGB')
if image_size is not None:
height, width, _ = image.shape
new_height, new_width = image_size
if height < width:
# Since width is bigger than height, this scaler
# factor will say by how much it bigger
# New width dimension will be scaled in the way
# that output image will have proportional width and
# height compae to it's original size
proportion_scaler = width / height
image_size = (new_height, int(new_width * proportion_scaler))
else:
proportion_scaler = height / width
image_size = (int(new_height * proportion_scaler), new_width)
image = transform.resize(
image, image_size,
preserve_range=True,
mode='constant')
if crop_size is not None:
height, width, _ = image.shape
height_slice = slice(
(height - crop_size[0]) // 2,
(height + crop_size[0]) // 2)
width_slice = slice(
(width - crop_size[1]) // 2,
(width + crop_size[1]) // 2)
image = image[height_slice, width_slice, :]
# (height, width, channel) -> (1, height, width, channel)
image = np.expand_dims(image, axis=0)
return asfloat(image)
def process(image, use_bgr):
# Per channel normalization
image[:, :, :, 0] -= 123.68
image[:, :, :, 1] -= 116.78
image[:, :, :, 2] -= 103.94
if use_bgr:
# RGB -> BGR
image[:, :, :, (0, 1, 2)] = image[:, :, :, (2, 1, 0)]
return image
def load_image(image_name, image_size=None, crop_size=None, use_bgr=True):
image = read_image(image_name, image_size, crop_size)
return process(image, use_bgr)
def deprocess(image):
image = image.copy()
# BGR -> RGB
image[:, :, (0, 1, 2)] = image[:, :, (2, 1, 0)]
image[:, :, 0] += 123.68
image[:, :, 1] += 116.78
image[:, :, 2] += 103.94
return image.astype(int)
def top_n(probs, n=5):
if probs.ndim == 2:
probs = probs[0] # take probabilities for first image
with open(IMAGENET_CLASSES_FILE, 'r') as f:
class_names = f.read().splitlines()
class_names = np.array(class_names)
max_probs_indices = probs.argsort()[-n:][::-1]
class_probs = probs[max_probs_indices]
top_classes = class_names[max_probs_indices]
return top_classes, class_probs
def print_top_n(probs, n=5):
top_classes, class_probs = top_n(probs, n)
print('-----------------------')
print('Top-{} predicted classes'.format(n))
print('-----------------------')
for top_class, class_prob in zip(top_classes, class_probs):
print("{:<80s}: {:.2%}".format(top_class, class_prob))
print('-----------------------')
| mit | -8,328,753,512,743,601,000 | 27.067164 | 74 | 0.581494 | false | 3.466359 | false | false | false |
pitivi/gst-editing-services | bindings/python/examples/material.py | 1 | 1281 | from gi.repository import Gst, GES, GLib
import os
class Simple:
def __init__(self, uri):
timeline = GES.Timeline()
trackv = GES.Track.video_raw_new()
self.layer = GES.Layer()
self.pipeline = GES.TimelinePipeline()
self.pipeline.add_timeline(timeline)
timeline.add_track(trackv)
timeline.add_layer(self.layer)
GES.Asset.new_async(GES.UriClip, uri, None, self.discoveredCb, None)
self.loop = GLib.MainLoop()
self.loop.run()
def discoveredCb(self, asset, result, blop):
self.layer.add_asset(asset, long(0), long(0), long(10 * Gst.SECOND), 1.0, GES.TrackType.VIDEO)
self.start()
def busMessageCb(self, bus, message, udata):
if message.type == Gst.MessageType.EOS:
print "EOS"
self.loop.quit()
if message.type == Gst.MessageType.ERROR:
print "ERROR"
self.loop.quit()
def start(self):
self.pipeline.set_state(Gst.State.PLAYING)
self.pipeline.get_bus().add_watch(GLib.PRIORITY_DEFAULT, self.busMessageCb, None)
if __name__ == "__main__":
if len(os.sys.argv) < 2:
print "You must specify a file URI"
exit(-1)
GES.init()
# And try!
Simple(os.sys.argv[1])
| lgpl-2.1 | -3,613,576,516,863,848,000 | 28.113636 | 102 | 0.596409 | false | 3.276215 | false | false | false |
ESultanik/lenticrypt | lenticrypt/iowrapper.py | 1 | 4442 | import collections.abc
import gzip
import sys
from io import BufferedReader, BytesIO, IOBase
from typing import BinaryIO, IO, Iterable, Union
IOWrappable = Union[bytes, bytearray, BinaryIO, Iterable[int]]
def get_length(stream: IO) -> int:
"""Gets the number of bytes in the stream."""
old_position = stream.tell()
stream.seek(0)
length = 0
try:
while True:
r = stream.read(1024)
if not r:
break
length += len(r)
finally:
stream.seek(old_position)
return length
class IOWrapper(collections.abc.Sequence):
def __init__(self, wrapped: IOWrappable):
self.wrapped = wrapped
self._file = None
def new_instance(self):
if self.wrapped == '-':
return sys.stdin
elif isinstance(self.wrapped, IOWrapper):
return self.wrapped.new_instance()
elif isinstance(self.wrapped, IOBase):
return self.wrapped
elif isinstance(self.wrapped, collections.abc.Iterable):
if not isinstance(self.wrapped, bytes) and not isinstance(self.wrapped, bytearray):
return BytesIO(bytes([b for b in self.wrapped]))
else:
return BytesIO(self.wrapped)
else:
return open(self.wrapped, 'rb')
def __len__(self):
if isinstance(self.wrapped, collections.abc.Sized):
return len(self.wrapped)
else:
with self.new_instance() as f:
return get_length(f)
def __getitem__(self, index: Union[slice, int]) -> Union[int, bytes]:
if isinstance(self.wrapped, collections.abc.Sequence):
return self.wrapped[index]
else:
with self.new_instance() as f:
old_position = f.tell()
try:
if isinstance(index, slice):
if index.start is None:
index = slice(0, index.stop, index.step)
if index.stop is None:
index = slice(index.start, len(self), index.step)
if index.step is None or index.step == 1:
f.seek(index.start)
return f.read(index.stop - index.start)
else:
ret = bytearray()
for i in range(index.start, index.stop, index.step):
f.seek(i)
r = f.read(1)
if r is None or len(r) < 1:
break
ret.append(r)
return bytes(ret)
else:
r = f.read(1)
if r is None or len(r) < 1:
return None
else:
return r[0]
finally:
f.seek(old_position)
def __enter__(self):
f = self.new_instance()
if f is not self.wrapped:
self._file = f
return f.__enter__()
def __exit__(self, type, value, tb):
if self._file is not None:
self._file.__exit__(type, value, tb)
self._file = None
class GzipIOWrapper(IOWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def new_instance(self):
return gzip.GzipFile(fileobj=super().new_instance())
GZIP_MAGIC = b'\x1F\x8B'
class AutoUnzippingStream:
def __init__(self, stream: IOWrappable):
self.__stream = stream
self.__to_close = None
def __enter__(self):
if self.__to_close is not None:
raise Exception(f"{self!r} is already a context manager")
stream = IOWrapper(self.__stream)
reader = BufferedReader(stream.__enter__())
to_close = [reader]
if reader.peek(len(GZIP_MAGIC)) == GZIP_MAGIC:
ret = GzipIOWrapper(reader)
to_close.append(ret)
ret = ret.__enter__()
else:
ret = reader
self.__to_close = (stream,) + tuple(to_close)
return ret
def __exit__(self, *args, **kwargs):
try:
for stream in self.__to_close:
stream.__exit__(*args, **kwargs)
finally:
self.__to_close = None
| gpl-2.0 | -7,377,706,488,560,450,000 | 31.903704 | 95 | 0.493246 | false | 4.367748 | false | false | false |
reuk/wayverb | demo/evaluation/receivers/cardioid.py | 2 | 1909 | #!/usr/local/bin/python
import numpy as np
import matplotlib
render = True
if render:
matplotlib.use('pgf')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator # added
import matplotlib.mlab as mlab
from string import split
import scipy.signal as signal
import pysndfile
import math
import os
import re
import json
def main():
fig, (ax0, ax1) = plt.subplots(nrows=2, sharex=True, sharey=True)
cmap = plt.get_cmap('viridis')
def plt_file(ax, file_name, name):
sndfile = pysndfile.PySndfile(file_name, 'r')
if sndfile.channels() != 1:
raise RuntimeError('please only load mono files')
Fs = sndfile.samplerate()
signal = sndfile.read_frames()
time = np.arange(len(signal)) / float(Fs)
ax.plot(time, signal)
ax.text(0.001, 0.75, name)
ax1.set_xlabel('time / s')
ax1.set_xlim([0, 0.05])
fig.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
times = [
3.0 / 340.0,
5.0 / 340.0,
11.0 / 340.0]
for ax in fig.axes:
ax.set_ylabel('amplitude')
for t in times:
ax.axvline(t, linestyle='dotted', color='red')
plt_file(ax0, 'away.wav', 'away')
plt_file(ax1, 'toward.wav', 'toward')
ax1.yaxis.set_major_locator(MaxNLocator(prune='upper')) # added
plt.suptitle('Early Response for Cardoid Receivers Pointing Toward and Away from Source')
#plt.tight_layout()
#plt.subplots_adjust(top=0.9)
plt.show()
if render:
plt.savefig('cardioid.svg', bbox_inches='tight', dpi=96, format='svg')
if __name__ == '__main__':
pgf_with_rc_fonts = {
'font.family': 'serif',
'font.serif': [],
'font.sans-serif': ['Helvetica Neue'],
'legend.fontsize': 12,
}
matplotlib.rcParams.update(pgf_with_rc_fonts)
main()
| gpl-2.0 | 7,487,160,876,632,411,000 | 24.118421 | 93 | 0.614982 | false | 3.094003 | false | false | false |
sheepslinky/franklin | server/control.py | 1 | 2018 | #!/usr/bin/python3
# control.py - USB hotplug handling for Franklin
# Copyright 2014-2016 Michigan Technological University
# Copyright 2016 Bas Wijnen <wijnen@debian.org>
# Author: Bas Wijnen <wijnen@debian.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import websocketd
import os
import sys
port = 8000
tls = False
user = None
password = None
current_level = 0
def credentials(level, value):
global current_level
if level < current_level:
return
current_level = level
if ':' in value:
user, password = value.split(':', 1)
else:
user = 'admin'
password = value
with open('/etc/default/franklin') as f:
for l in f.readlines():
l = l.strip()
if l == '' or l.startswith('#') or not '=' in l:
continue
key, value = l.split('=', 1)
if key == 'PORT':
# Leave it as a string because it need not be numerical.
port = value.strip()
if key == 'TLS':
tls = value.lower().strip() in ('1', 'true')
if key == 'USER':
credentials(0, value.strip())
if key == 'EXPERT':
credentials(1, value.strip())
if key == 'ADMIN':
credentials(2, value.strip())
try:
p = websocketd.RPC(port, tls = tls, url = '/admin', user = user, password = password)
action = os.getenv('ACTION')
dev = os.getenv('DEVNAME')
if action == 'add':
p.add_port(dev)
elif action == 'remove':
p.remove_port(dev)
except:
sys.stderr.write('Failed to handle serial port event for Franklin')
| agpl-3.0 | 871,346,251,859,248,100 | 28.676471 | 86 | 0.69227 | false | 3.270665 | false | false | false |
Zamme/bge-editor | bgee_entity.py | 1 | 8653 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from . import bgee_config, bgee_component, bgee_tagslayers
class GameEditorEntityPanel(bpy.types.Panel):
bl_idname = "bgee_entity_panel"
bl_label = "Entity"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Entity"
def draw(self, context):
gm = context.blend_data.objects["GameManager"]
layout = self.layout
# Selected entities names, location, rotation, scale
row = layout.row(align=True)
row.label(text="Entities selected:")
row = layout.row(align=True)
box = row.box()
for ob in context.selected_objects:
boxrow = box.row(align=True)
boxrow.label(text=ob.name)
boxrow.prop(ob.entityProps, "active")
boxrow = box.row(align=True)
boxrow.prop(ob.entityProps, "prefab")
boxrow = box.row(align=True)
if (len(context.selected_objects) > 1):
# Check tag equals
pass
else:
boxrow.label("Tag:")
boxrow.prop(context.active_object, "bgeeTag")
boxrow = box.row(align=True)
boxrow.label("Layers:")
boxcolumn = boxrow.column(align=True)
for layId,lay in enumerate(context.active_object.entityProps.layers):
boxrowcolumn = boxcolumn.row(align=True)
boxrowcolumn.label(lay.first)
if (lay.first != "None"):
remParam = boxrowcolumn.operator("bgee.remove_entity_layer", icon="X")
remParam.selectedLayer = layId
remParam.selectedEntity = context.active_object.name
boxcolumn.separator()
boxrowcolumn = boxcolumn.row(align=True)
boxrowcolumn.prop(gm, "bgeeLayer")
addOp = boxrowcolumn.operator("bgee.add_entity_layer", "Add")
addOp.selectedEntity = ob.name
addOp.selectedLayer = gm.bgeeLayer
row = layout.row(align=True)
row.prop(gm.entityTransform, "location")
row = layout.row(align=True)
row.prop(gm.entityTransform, "rotation")
row = layout.row(align=True)
row.prop(gm.entityTransform, "scale")
# START Multiselection transform methods
def update_location(self, context):
gm = context.blend_data.objects["GameManager"]
obs = context.selected_objects
for ob in obs:
ob.location = gm.entityTransform.location
def update_rotation(self, context):
gm = context.blend_data.objects["GameManager"]
obs = context.selected_objects
for ob in obs:
ob.rotation_euler = gm.entityTransform.rotation
def update_scale(self, context):
gm = context.blend_data.objects["GameManager"]
obs = context.selected_objects
for ob in obs:
ob.scale = gm.entityTransform.scale
def update_transform(context): # TODO: NOT WORKING WELL
gm = context.blend_data.objects["GameManager"]
obs = context.selected_objects
if (len(obs) > 0):
locationX, locationY, locationZ, rotationX, rotationY, rotationZ, scaleX, scaleY, scaleZ = True, True, True, True, True, True, True, True, True
# Location
for ob in obs:
if (obs[0].location.x != ob.location.x):
locationX = False
if (locationX):
gm.entityTransform.location[0] = obs[0].location[0]
else:
print("No object selected")
# END Multiselection transform methods
class MultiEntityTransform(bpy.types.PropertyGroup):
location = bpy.props.FloatVectorProperty(update=update_location)
rotation = bpy.props.FloatVectorProperty(subtype="EULER", update=update_rotation)
scale = bpy.props.FloatVectorProperty(update=update_scale)
class BGEE_OT_multiselection(bpy.types.Operator):
bl_idname = "bgee.multiselection"
bl_label = "Entity multiselection catcher"
_updating = False
_calcs_done = False
_timer = None
nObjects = None
def selected_objects_changed(self, context):
currentNObjects = len(context.selected_objects)
if (self.nObjects is not None):
if (self.nObjects != currentNObjects):
self.nObjects = currentNObjects
return True
else:
return False
else:
self.nObjects = currentNObjects
return False
#self._calcs_done = True
def modal(self, context, event):
if event.type == 'TIMER' and not self._updating:
self._updating = True
if (self.selected_objects_changed(context)):
update_transform(context)
self._updating = False
if self._calcs_done:
self.cancel(context)
return {'PASS_THROUGH'}
def execute(self, context):
context.window_manager.modal_handler_add(self)
self._updating = False
self._timer = context.window_manager.event_timer_add(0.2, context.window)
return {'RUNNING_MODAL'}
def cancel(self, context):
context.window_manager.event_timer_remove(self._timer)
self._timer = None
return {'CANCELLED'}
# ENTITY PROPERTIES
class EntityProperties(bpy.types.PropertyGroup):
active = bpy.props.BoolProperty(default=True, name="Active")
prefab = bpy.props.BoolProperty(default=False, name="Prefab")
#tag = bpy.props.EnumProperty(items=bgee_config.bgeeCurrentTags)
layers = bpy.props.CollectionProperty(type=bgee_tagslayers.LayerItem)
components = bpy.props.CollectionProperty(type=bgee_component.ObjectComponent)
class BGEE_OT_add_entity_layer(bpy.types.Operator):
bl_idname = "bgee.add_entity_layer"
bl_label = "Add Entity Layer"
selectedLayer = bpy.props.StringProperty()
selectedEntity = bpy.props.StringProperty()
def execute(self, context):
entity = bpy.data.objects[self.selectedEntity]
# If same layer in dont add
same = False
for lay in entity.entityProps.layers:
if (self.selectedLayer == lay.first):
same = True
if (not same):
# If None is present
if (len(entity.entityProps.layers) > 0):
if (entity.entityProps.layers[0].first == "None"):
entity.entityProps.layers.remove(0)
# If None is selected
if (self.selectedLayer == "None"):
while (len(entity.entityProps.layers) > 0):
entity.entityProps.layers.remove(0)
addedLayer = entity.entityProps.layers.add()
addedLayer.first, addedLayer.second, addedLayer.third = self.selectedLayer, self.selectedLayer, self.selectedLayer
return {'FINISHED'}
class BGEE_OT_remove_entity_layer(bpy.types.Operator):
bl_idname = "bgee.remove_entity_layer"
bl_label = ""
selectedLayer = bpy.props.IntProperty()
selectedEntity = bpy.props.StringProperty()
def execute(self, context):
entity = bpy.data.objects[self.selectedEntity]
entity.entityProps.layers.remove(self.selectedLayer)
if (len(entity.entityProps.layers) < 1):
noneLayer = entity.entityProps.layers.add()
noneLayer.first, noneLayer.second, noneLayer.third = "None", "None", "None"
return {'FINISHED'}
''' COMING SOON
class EntityList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
ob = data
slot = item
ma = slot.material
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.label(ob.name)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label("", icon_value=icon)
'''
| lgpl-3.0 | -5,057,835,655,556,257,000 | 37.977477 | 151 | 0.624061 | false | 3.888989 | false | false | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/system-config-printer/troubleshoot/QueueNotEnabled.py | 1 | 2871 | #!/usr/bin/python
## Printing troubleshooter
## Copyright (C) 2008, 2009 Red Hat, Inc.
## Copyright (C) 2008, 2009 Tim Waugh <twaugh@redhat.com>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import cups
from base import *
class QueueNotEnabled(Question):
def __init__ (self, troubleshooter):
Question.__init__ (self, troubleshooter, "Queue not enabled?")
self.label = gtk.Label ()
solution = gtk.VBox ()
self.label.set_line_wrap (True)
self.label.set_alignment (0, 0)
solution.pack_start (self.label, False, False, 0)
solution.set_border_width (12)
troubleshooter.new_page (solution, self)
def display (self):
answers = self.troubleshooter.answers
if not answers['cups_queue_listed']:
return False
if answers['is_cups_class']:
queue = answers['cups_class_dict']
else:
queue = answers['cups_printer_dict']
enabled = queue['printer-state'] != cups.IPP_PRINTER_STOPPED
if enabled:
return False
if answers['cups_printer_remote']:
attrs = answers['remote_cups_queue_attributes']
reason = attrs['printer-state-message']
else:
reason = queue['printer-state-message']
if reason:
reason = _("The reason given is: '%s'.") % reason
else:
reason = _("This may be due to the printer being disconnected or "
"switched off.")
text = ('<span weight="bold" size="larger">' +
_("Queue Not Enabled") + '</span>\n\n' +
_("The queue '%s' is not enabled.") %
answers['cups_queue'])
if reason:
text += ' ' + reason
if not answers['cups_printer_remote']:
text += '\n\n'
text += _("To enable it, select the 'Enabled' checkbox in the "
"'Policies' tab for the printer in the printer "
"administration tool.")
text += ' ' + _(TEXT_start_print_admin_tool)
self.label.set_markup (text)
return True
def can_click_forward (self):
return False
| gpl-3.0 | -4,737,863,344,954,821,000 | 34.8875 | 82 | 0.599443 | false | 4.083926 | false | false | false |
tiagofrepereira2012/examples.tensorflow | examples/tensorflow/DataShuffler.py | 1 | 6754 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
import numpy
def scale_mean_norm(data, scale=0.00390625):
mean = numpy.mean(data)
data = (data - mean) * scale
return data, mean
"""
Data
"""
class DataShuffler(object):
def __init__(self, data, labels, perc_train=0.9, scale=True):
"""
Some base functions for neural networks
**Parameters**
data:
"""
scale_value = 0.00390625
total_samples = data.shape[0]
indexes = numpy.array(range(total_samples))
numpy.random.shuffle(indexes)
# Spliting train and validation
train_samples = int(round(total_samples * perc_train))
validation_samples = total_samples - train_samples
data = numpy.reshape(data, (data.shape[0], 28, 28, 1))
self.train_data = data[indexes[0:train_samples], :, :, :]
self.train_labels = labels[indexes[0:train_samples]]
self.validation_data = data[indexes[train_samples:train_samples + validation_samples], :, :, :]
self.validation_labels = labels[indexes[train_samples:train_samples + validation_samples]]
self.total_labels = 10
if scale:
# data = scale_minmax_norm(data,lower_bound = -1, upper_bound = 1)
self.train_data, self.mean = scale_mean_norm(self.train_data)
self.validation_data = (self.validation_data - self.mean) * scale_value
def get_batch(self, n_samples, train_dataset=True):
if train_dataset:
data = self.train_data
label = self.train_labels
else:
data = self.validation_data
label = self.validation_labels
# Shuffling samples
indexes = numpy.array(range(data.shape[0]))
numpy.random.shuffle(indexes)
selected_data = data[indexes[0:n_samples], :, :, :]
selected_labels = label[indexes[0:n_samples]]
return selected_data.astype("float32"), selected_labels
def get_pair(self, n_pair=1, is_target_set_train=True, zero_one_labels=True):
"""
Get a random pair of samples
**Parameters**
is_target_set_train: Defining the target set to get the batch
**Return**
"""
def get_genuine_or_not(input_data, input_labels, genuine=True):
if genuine:
# TODO: THIS KEY SELECTION NEEDS TO BE MORE EFFICIENT
# Getting a client
index = numpy.random.randint(self.total_labels)
# Getting the indexes of the data from a particular client
indexes = numpy.where(input_labels == index)[0]
numpy.random.shuffle(indexes)
# Picking a pair
data = input_data[indexes[0], :, :, :]
data_p = input_data[indexes[1], :, :, :]
else:
# Picking a pair from different clients
index = numpy.random.choice(self.total_labels, 2, replace=False)
# Getting the indexes of the two clients
indexes = numpy.where(input_labels == index[0])[0]
indexes_p = numpy.where(input_labels == index[1])[0]
numpy.random.shuffle(indexes)
numpy.random.shuffle(indexes_p)
# Picking a pair
data = input_data[indexes[0], :, :, :]
data_p = input_data[indexes_p[0], :, :, :]
return data, data_p
if is_target_set_train:
target_data = self.train_data
target_labels = self.train_labels
else:
target_data = self.validation_data
target_labels = self.validation_labels
total_data = n_pair * 2
c = target_data.shape[3]
w = target_data.shape[1]
h = target_data.shape[2]
data = numpy.zeros(shape=(total_data, w, h, c), dtype='float32')
data_p = numpy.zeros(shape=(total_data, w, h, c), dtype='float32')
labels_siamese = numpy.zeros(shape=total_data, dtype='float32')
genuine = True
for i in range(total_data):
data[i, :, :, :], data_p[i, :, :, :] = get_genuine_or_not(target_data, target_labels, genuine=genuine)
if zero_one_labels:
labels_siamese[i] = not genuine
else:
labels_siamese[i] = -1 if genuine else +1
genuine = not genuine
return data, data_p, labels_siamese
def get_triplet(self, n_labels, n_triplets=1, is_target_set_train=True):
"""
Get a triplet
**Parameters**
is_target_set_train: Defining the target set to get the batch
**Return**
"""
def get_one_triplet(input_data, input_labels):
# Getting a pair of clients
index = numpy.random.choice(n_labels, 2, replace=False)
label_positive = index[0]
label_negative = index[1]
# Getting the indexes of the data from a particular client
indexes = numpy.where(input_labels == index[0])[0]
numpy.random.shuffle(indexes)
# Picking a positive pair
data_anchor = input_data[indexes[0], :, :, :]
data_positive = input_data[indexes[1], :, :, :]
# Picking a negative sample
indexes = numpy.where(input_labels == index[1])[0]
numpy.random.shuffle(indexes)
data_negative = input_data[indexes[0], :, :, :]
return data_anchor, data_positive, data_negative, label_positive, label_positive, label_negative
if is_target_set_train:
target_data = self.train_data
target_labels = self.train_labels
else:
target_data = self.validation_data
target_labels = self.validation_labels
c = target_data.shape[3]
w = target_data.shape[1]
h = target_data.shape[2]
data_a = numpy.zeros(shape=(n_triplets, w, h, c), dtype='float32')
data_p = numpy.zeros(shape=(n_triplets, w, h, c), dtype='float32')
data_n = numpy.zeros(shape=(n_triplets, w, h, c), dtype='float32')
labels_a = numpy.zeros(shape=n_triplets, dtype='float32')
labels_p = numpy.zeros(shape=n_triplets, dtype='float32')
labels_n = numpy.zeros(shape=n_triplets, dtype='float32')
for i in range(n_triplets):
data_a[i, :, :, :], data_p[i, :, :, :], data_n[i, :, :, :], \
labels_a[i], labels_p[i], labels_n[i] = \
get_one_triplet(target_data, target_labels)
return data_a, data_p, data_n, labels_a, labels_p, labels_n
| gpl-3.0 | 7,439,942,784,333,194,000 | 33.635897 | 114 | 0.56485 | false | 3.670652 | false | false | false |
luca-heltai/ePICURE | applications/arclength_example.py | 1 | 4351 | import numpy as np
import math
import scipy.special as sp
from scipy.interpolate import lagrange
from numpy.polynomial.chebyshev import chebgauss
#import sys
from utilities import *
from interfaces import *
#from utilities.arclength import*
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
R = 1
P = 1
intervals=9
vs_order=2
n = (intervals*(vs_order)+1-1)
print (n)
ii = np.linspace(0,2,n+1)
#ii = [(n+1) * np.cos(i * np.pi / (n + 1)) for i in range(n+1)]
control_points_3d = np.asarray(np.zeros([n+1,2,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
control_points_3d[:,0,0] = np.array([R*np.cos(5*i * np.pi / (n + 1))for i in ii])
control_points_3d[:,0,1] = np.array([R*np.sin(5*i * np.pi / (n + 1))for i in ii])
control_points_3d[:,0,2] = np.array([P*i for i in range(n+1)])
control_points_3d[:,1,0] = np.array([R*np.cos(5*i * np.pi / (n + 1))for i in ii])
control_points_3d[:,1,1] = np.array([R*np.sin(5*i * np.pi / (n + 1))for i in ii])
control_points_3d[:,1,2] = np.array([2*P*i for i in range(n+1)])
#print control_points_3d[0]
vsl = IteratedVectorSpace(UniformLagrangeVectorSpace(vs_order+1), np.linspace(0,1,intervals+1))
print (vsl.n_dofs)
#vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),1,5)
#BSpline parameters
n = 17
p = 3
# Number of least square points
n_ls = 140
# Open knot vector
knots = np.zeros(n+2*p)
knots[p:-p] = np.linspace(0,1,n)
knots[0:p] = 0
knots[-p::] = 1
#vsl = BsplineVectorSpace(p, knots)
#print (vsl.n_dofs)
arky = ArcLengthParametrizer(vsl, control_points_3d)
new_control_points_3d = arky.reparametrize()
new_arky = ArcLengthParametrizer(vsl, new_control_points_3d)
new_arky.reparametrize()
plt.plot(arky.points_s[:,0],arky.points_s[:,1],label='original')
plt.plot(new_arky.points_s[:,0],new_arky.points_s[:,1],label='reparametrized')
plt.legend()
plt.savefig('new_arclength.png')
plt.close()
plt.close()
print (np.amax(np.abs(control_points_3d - new_control_points_3d )))
#print (np.squeeze(new_control_points_3d[:,0,:]))
tt = np.linspace(0, 1, 128)
tt4 = 4 * tt + 1
#print (tt4)
vals_1 = vsl.element(np.squeeze(control_points_3d[:,0,:]))(tt)
vals_2 = vsl.element(np.squeeze(control_points_3d[:,1,:]))(tt)
new_vals_1 = vsl.element(np.squeeze(new_control_points_3d[:,0,:]))(tt)
new_vals_2 = vsl.element(np.squeeze(new_control_points_3d[:,1,:]))(tt)
#print (vsl.element(np.squeeze(control_points_3d[:,1,:]))(tt4) == arky.curve(tt4)), #vals_1
#print (vals.shape, new_vals.shape)
x = np.squeeze(np.array(vals_1[0,:]))
y = np.squeeze(np.array(vals_1[1,:]))
z = np.squeeze(np.array(vals_1[2,:]))
#print (x.shape)
new_x = np.squeeze(np.array(new_vals_1[0,:]))
new_y = np.squeeze(np.array(new_vals_1[1,:]))
new_z = np.squeeze(np.array(new_vals_1[2,:]))
#print (new_x.shape, x.shape, np.amax(np.abs(vals-new_vals),0))
#print (control_points_3d[3])
#print (x,y,z)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(x, y, z,'r', label='test_curve')
ax.plot(np.squeeze(np.array(control_points_3d[:,0,0])),np.squeeze(np.array(control_points_3d[:,0,1])),np.squeeze(np.array(control_points_3d[:,0,2])),'r*-',label='orig_cp')
ax.plot(new_x, new_y, new_z,'g', label='new_test_curve')
ax.plot(np.squeeze(np.array(new_control_points_3d[:,0,0])),np.squeeze(np.array(new_control_points_3d[:,0,1])),np.squeeze(np.array(new_control_points_3d[:,0,2])),'g*-',label='new_cp')
ax.legend()
plt.savefig('test_curve_1.png')
plt.close()
plt.close()
#print (vals_1.shape, new_vals_1.shape)
x = np.squeeze(np.array(vals_2[0,:]))
y = np.squeeze(np.array(vals_2[1,:]))
z = np.squeeze(np.array(vals_2[2,:]))
new_x = np.squeeze(np.array(new_vals_2[0,:]))
new_y = np.squeeze(np.array(new_vals_2[1,:]))
new_z = np.squeeze(np.array(new_vals_2[2,:]))
fig = plt.figure()
#print (new_x)
ax = fig.gca(projection='3d')
ax.plot(x, y, z,'r', label='test_curve')
ax.plot(np.squeeze(np.array(control_points_3d[:,1,0])),np.squeeze(np.array(control_points_3d[:,1,1])),np.squeeze(np.array(control_points_3d[:,1,2])),'r*-',label='orig_cp')
ax.plot(new_x, new_y, new_z,'g', label='new_test_curve')
ax.plot(np.squeeze(np.array(new_control_points_3d[:,1,0])),np.squeeze(np.array(new_control_points_3d[:,1,1])),np.squeeze(np.array(new_control_points_3d[:,1,2])),'g*-',label='new_cp')
ax.legend()
plt.savefig('test_curve_2.png')
plt.close()
plt.close()
| gpl-2.0 | 6,500,625,509,890,941,000 | 36.834783 | 182 | 0.664675 | false | 2.28399 | true | false | false |
iulian787/spack | var/spack/repos/builtin/packages/blaze/package.py | 2 | 3063 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Blaze(CMakePackage):
"""Blaze is an open-source, high-performance C++ math library for dense and
sparse arithmetic. With its state-of-the-art Smart Expression Template
implementation Blaze combines the elegance and ease of use of a
domain-specific language with HPC-grade performance, making it one of the
most intuitive and fastest C++ math libraries available.
"""
homepage = "https://bitbucket.org/blaze-lib/blaze/overview"
url = "https://bitbucket.org/blaze-lib/blaze/downloads/blaze-3.8.tar.gz"
git = "https://bitbucket.org/blaze-lib/blaze.git"
# Blaze requires at least cmake 3.8.0 for C++14 features.
depends_on('cmake@3.8.0:', type='build')
depends_on('blas')
version('master', branch='master')
version('3.8', sha256='dfaae1a3a9fea0b3cc92e78c9858dcc6c93301d59f67de5d388a3a41c8a629ae')
version('3.7', sha256='ef3cbc5db7d62dcdde0af88d3c951051254afd750d26773406fddb6afc5ad890')
version('3.6', sha256='2ebbadacaf3f066e27352c1e413ead127b7ced8a3b202ae45f39c8f5f12324cc')
version('3.5', sha256='f50d4a57796b8012d3e6d416667d9abe6f4d95994eb9deb86cd4491381dec624')
version('3.4', sha256='fd474ab479e81d31edf27d4a529706b418f874caa7b046c67489128c20dda66f')
version('3.3', sha256='138cbb7b95775c10bf56a5ab3596a32205751299b19699984b6ed55b1bf989d0')
version('3.2', sha256='fb7e83d3a8c1ba04d3a51234708092b75a1abf3b7c4d0db5e6cf3cbed771b869')
version('3.1', sha256='a122d6758d9ada7ab516417f7b5ad186a4a9b390bba682f009df6585f5550716')
version('3.0', sha256='d66abaf4633d60b6e6472f6ecd7db7b4fb5f74a4afcfdf00c92e1ea61f2e0870')
version('2.6', sha256='a6b927db14b43fad483670dfa2acd7ecc94fd53085cdf18f262d2dc613857fb6')
version('2.5', sha256='5faeca8a26e04f70a5b3f94e88ef1fbe96a89e3722cd89e5f9d4bc8267b33d41')
version('2.4', sha256='34af70c8bb4da5fd0017b7c47e5efbfef9aadbabc5aae416582901a4059d1fa3')
version('2.3', sha256='785089db7f15684c24018b931f9f564954a79389166ac1f3e256a56c667d49f2')
version('2.2', sha256='448e70a440d71afa6325bae254ca7367b10e61431084adbf2ac679dbd5da78d2')
version('2.1', sha256='b982c03236c6a7ae396850eba0ef8fb1642ddf6448531063bf7239d9ff3290fd')
version('2.0', sha256='7bdf555e97455a2f42f40396b32caa9cf3e52bdd1877e0289115825113f4dcb2')
version('1.5', sha256='5c69b605b712616dcd29fa25abecb20b977ef318207ef96176ab67b2ad891e1e')
version('1.4', sha256='2e48d2e5a3a06abb23716829501bb0b825c58ad156faab6df0cfeef1bcdfbc82')
version('1.3', sha256='361bfbf2d2bf8557d123da3af8abc70e4c3b13d9c94a8227aeb751e06acdb8cf')
version('1.2', sha256='16f56d4f61dca229fa7e17a0d1e348a1f3246c65cded2df5db33babebf8f9b9d')
version('1.1', sha256='6add20eb9c176ea9f8091c49b101f46d1a1a6bd9c31553a6eff5e53603f0527f')
version('1.0', sha256='ee13cfd467c1a4b0fe7cc58b61b846eae862167a90dd2e60559626a30418b5a3')
| lgpl-2.1 | 5,871,476,873,201,262,000 | 62.8125 | 93 | 0.79889 | false | 2.242313 | false | false | false |
hackerspace-ntnu/website | website/migrations/0010_auto_20210322_1840.py | 1 | 1453 | # Generated by Django 3.1.2 on 2021-03-22 18:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0009_auto_20210322_1837'),
]
operations = [
migrations.AlterField(
model_name='banner',
name='color',
field=models.CharField(default='hs-yellow', help_text='Bakgrunnsfargen til banneret som en hex-farge. hs-green, hs-yellow og hs-red støttes også som presets.', max_length=10, verbose_name='bannercolor'),
),
migrations.AlterField(
model_name='banner',
name='site',
field=models.CharField(default='*', help_text="Det interne navnet på URL-stien til sidene som banneret skal dukke opp på. Wildcard (*) støttes. F.eks. er '*' ALLE sider, 'inventory:*' er alle lagersider.", max_length=250, verbose_name='bannersider'),
),
migrations.AlterField(
model_name='banner',
name='text',
field=models.TextField(default='Sample Text', help_text='Tekst som vises i banneret.', max_length=1000, verbose_name='bannertext'),
),
migrations.AlterField(
model_name='banner',
name='text_color',
field=models.CharField(default='hs-black', help_text='Tekstfargen på banneret. hs-white og hs-black støttes som presets.', max_length=10, verbose_name='bannertextcolor'),
),
]
| mit | -8,948,652,533,114,494,000 | 42.818182 | 262 | 0.625173 | false | 3.535452 | false | false | false |
kamcpp/tensorflow | tensorflow/contrib/bayesflow/python/kernel_tests/stochastic_graph_test.py | 10 | 9224 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
st = tf.contrib.bayesflow.stochastic_tensor
sg = tf.contrib.bayesflow.stochastic_graph
distributions = tf.contrib.distributions
class NormalNotParam(distributions.Normal):
@property
def is_reparameterized(self):
return False
class TestSurrogateLosses(tf.test.TestCase):
def testPathwiseDerivativeDoesNotAddSurrogateLosses(self):
with self.test_session():
mu = [0.0, 0.1, 0.2]
sigma = tf.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleAndReshapeValue()):
prior = st.StochasticTensor(distributions.Normal, mu=mu, sigma=sigma)
likelihood = st.StochasticTensor(
distributions.Normal, mu=prior, sigma=sigma)
self.assertTrue(prior.distribution.is_reparameterized)
self.assertTrue(likelihood.distribution.is_reparameterized)
loss = tf.square(tf.identity(likelihood) - [0.0, 0.1, 0.2])
sum_loss = tf.reduce_sum(loss)
surrogate_loss = sg.surrogate_loss([loss])
with self.assertRaisesRegexp(ValueError, "dimensionality 1 or greater"):
_ = sg.surrogate_loss([sum_loss])
surrogate_from_both = sg.surrogate_loss(
[loss, sum_loss * tf.ones_like(loss)])
# Pathwise derivative terms do not require add'l surrogate loss terms.
with self.test_session() as sess:
self.assertAllClose(*sess.run([loss, surrogate_loss]))
self.assertAllClose(*sess.run([(loss + sum_loss), surrogate_from_both]))
def _testSurrogateLoss(self, session, losses, expected_addl_terms, xs):
surrogate_loss = sg.surrogate_loss(losses)
expected_surrogate_loss = tf.add_n(losses + expected_addl_terms)
self.assertAllClose(*session.run([surrogate_loss, expected_surrogate_loss]))
# Test backprop
expected_grads = tf.gradients(ys=expected_surrogate_loss, xs=xs)
surrogate_grads = tf.gradients(ys=surrogate_loss, xs=xs)
self.assertEqual(len(expected_grads), len(surrogate_grads))
grad_values = session.run(expected_grads + surrogate_grads)
n_grad = len(expected_grads)
self.assertAllClose(grad_values[:n_grad], grad_values[n_grad:])
def testSurrogateLoss(self):
with self.test_session() as sess:
mu = tf.constant([0.0, 0.1, 0.2])
sigma = tf.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleAndReshapeValue()):
prior = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
likelihood = st.StochasticTensor(
NormalNotParam, mu=prior, sigma=sigma)
prior_2 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
loss = tf.square(tf.identity(likelihood) - mu)
part_loss = tf.square(tf.identity(prior) - mu)
sum_loss = tf.reduce_sum(loss)
loss_nodeps = tf.square(tf.identity(prior_2) - mu)
# For ground truth, use the stop-gradient versions of the losses
loss_nograd = tf.stop_gradient(loss)
loss_nodeps_nograd = tf.stop_gradient(loss_nodeps)
sum_loss_nograd = tf.stop_gradient(sum_loss)
# These score functions should ignore prior_2
self._testSurrogateLoss(
session=sess,
losses=[loss],
expected_addl_terms=[
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
prior.distribution.log_pdf(prior.value()) * loss_nograd],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, part_loss],
expected_addl_terms=[
likelihood.distribution.log_pdf(likelihood.value()) * loss_nograd,
(prior.distribution.log_pdf(prior.value())
* tf.stop_gradient(part_loss + loss))],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[sum_loss * tf.ones_like(loss)],
expected_addl_terms=[
(likelihood.distribution.log_pdf(likelihood.value())
* sum_loss_nograd),
prior.distribution.log_pdf(prior.value()) * sum_loss_nograd],
xs=[mu, sigma])
self._testSurrogateLoss(
session=sess,
losses=[loss, sum_loss * tf.ones_like(loss)],
expected_addl_terms=[
(likelihood.distribution.log_pdf(likelihood.value())
* tf.stop_gradient(loss + sum_loss)),
(prior.distribution.log_pdf(prior.value())
* tf.stop_gradient(loss + sum_loss))],
xs=[mu, sigma])
# These score functions should ignore prior and likelihood
self._testSurrogateLoss(
session=sess,
losses=[loss_nodeps],
expected_addl_terms=[(prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd)],
xs=[mu, sigma])
# These score functions should include all terms selectively
self._testSurrogateLoss(
session=sess,
losses=[loss, loss_nodeps],
# We can't guarantee ordering of output losses in this case.
expected_addl_terms=[
(likelihood.distribution.log_pdf(likelihood.value())
* loss_nograd),
prior.distribution.log_pdf(prior.value()) * loss_nograd,
(prior_2.distribution.log_pdf(prior_2.value())
* loss_nodeps_nograd)],
xs=[mu, sigma])
def testNoSurrogateLoss(self):
with self.test_session():
mu = tf.constant([0.0, 0.1, 0.2])
sigma = tf.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleAndReshapeValue()):
dt = st.StochasticTensor(NormalNotParam,
mu=mu,
sigma=sigma,
loss_fn=None)
self.assertEqual(None, dt.loss(tf.constant([2.0])))
def testExplicitStochasticTensors(self):
with self.test_session() as sess:
mu = tf.constant([0.0, 0.1, 0.2])
sigma = tf.constant([1.1, 1.2, 1.3])
with st.value_type(st.SampleAndReshapeValue()):
dt1 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
dt2 = st.StochasticTensor(NormalNotParam, mu=mu, sigma=sigma)
loss = tf.square(tf.identity(dt1)) + 10. + dt2
sl_all = sg.surrogate_loss([loss])
sl_dt1 = sg.surrogate_loss([loss], stochastic_tensors=[dt1])
sl_dt2 = sg.surrogate_loss([loss], stochastic_tensors=[dt2])
dt1_term = dt1.distribution.log_pdf(dt1) * loss
dt2_term = dt2.distribution.log_pdf(dt2) * loss
self.assertAllClose(*sess.run(
[sl_all, sum([loss, dt1_term, dt2_term])]))
self.assertAllClose(*sess.run([sl_dt1, sum([loss, dt1_term])]))
self.assertAllClose(*sess.run([sl_dt2, sum([loss, dt2_term])]))
class StochasticDependenciesMapTest(tf.test.TestCase):
def testBuildsMapOfUpstreamNodes(self):
dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
dt2 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
out1 = dt1.value() + 1.
out2 = dt2.value() + 2.
x = out1 + out2
y = out2 * 3.
dep_map = sg._stochastic_dependencies_map([x, y])
self.assertEqual(dep_map[dt1], set([x]))
self.assertEqual(dep_map[dt2], set([x, y]))
def testHandlesStackedStochasticNodes(self):
dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
out1 = dt1.value() + 1.
dt2 = st.StochasticTensor(distributions.Normal, mu=out1, sigma=1.)
x = dt2.value() + 2.
dt3 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
y = dt3.value() * 3.
dep_map = sg._stochastic_dependencies_map([x, y])
self.assertEqual(dep_map[dt1], set([x]))
self.assertEqual(dep_map[dt2], set([x]))
self.assertEqual(dep_map[dt3], set([y]))
def testTraversesControlInputs(self):
dt1 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
logits = dt1.value() * 3.
dt2 = st.StochasticTensor(distributions.Bernoulli, logits=logits)
dt3 = st.StochasticTensor(distributions.Normal, mu=0., sigma=1.)
x = dt3.value()
y = tf.ones((2, 2)) * 4.
z = tf.ones((2, 2)) * 3.
out = tf.cond(
tf.cast(dt2, tf.bool), lambda: tf.add(x, y), lambda: tf.square(z))
out += 5.
dep_map = sg._stochastic_dependencies_map([out])
self.assertEqual(dep_map[dt1], set([out]))
self.assertEqual(dep_map[dt2], set([out]))
self.assertEqual(dep_map[dt3], set([out]))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -1,007,608,493,723,966,100 | 39.279476 | 80 | 0.629445 | false | 3.499241 | true | false | false |
ForeverWintr/metafunctions | setup.py | 1 | 2340 | '''
MetaFunctions is a function composition and data pipelining library.
For more information, please visit the `project on github <https://github.com/ForeverWintr/metafunctions>`_.
'''
import os
import sys
import contextlib
import pathlib
import shutil
from setuptools import setup, find_packages, Command
import metafunctions
here = os.path.abspath(os.path.dirname(__file__))
class UploadCommand(Command):
"""
Support setup.py upload.
https://github.com/kennethreitz/setup.py/blob/master/setup.py
"""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
shutil.rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
setup(
name=metafunctions.__name__,
version=metafunctions.__version__,
description='Metafunctions is a function composition and data pipelining library',
long_description=__doc__,
url='https://github.com/ForeverWintr/metafunctions',
author='Tom Rutherford',
author_email='foreverwintr@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='functional-programming function-composition',
packages=find_packages(),
test_suite='metafunctions.tests',
install_requires='ansicolors>=1.1.8',
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| mit | -1,384,161,527,418,276,900 | 27.814815 | 108 | 0.647815 | false | 4.038062 | false | false | false |
allancaffee/scaly-mongo | tests/acceptance/test_find.py | 1 | 7354 | from scalymongo import Document
from tests.acceptance.base_acceptance_test import BaseAcceptanceTest
class FindExample(Document):
structure = {
'name': basestring,
'age': int,
}
indexes = [{
'fields': [('name', 1)],
}]
__database__ = 'test'
__collection__ = __file__
class BaseFindTest(BaseAcceptanceTest):
@classmethod
def setup_class(cls):
BaseAcceptanceTest.setup_class()
cls.connection.models.FindExample.collection.drop()
cls.docs = [
{'name': 'Alice', 'age': 32},
{'name': 'Bob', 'age': 32},
{'name': 'Carl', 'age': 41},
{'name': 'Donna', 'age': 35},
]
cls.docs = [cls.connection.models.FindExample(doc)
for doc in cls.docs]
for doc in cls.docs:
doc.save()
cls.connection.models.FindExample.ensure_indexes()
@classmethod
def teardown_class(cls):
super(BaseFindTest, cls).teardown_class()
cls.connection.models.FindExample.collection.drop()
class PropertyReturnsScalyMongoDocuments(object):
def should_return_only_find_example_instances(self):
for returned_doc in self.returned_docs:
assert isinstance(returned_doc, FindExample)
class WhenFindingByAge(BaseFindTest):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find({'age': 32})
cls.returned_docs = list(cls.returned)
def should_find_alice_and_bob(self):
assert self.returned_docs == self.docs[:2]
def should_return_2_results(self):
assert self.returned.count() == 2
class WhenFindingWithoutArgs(BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find()
cls.returned_docs = list(cls.returned)
def should_find_all(self):
assert self.returned_docs == self.docs
class WhenFindingWithoutArgsOnRewoundCursor(BaseFindTest):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find()
cls.first_returned_docs = list(cls.returned)
cls.returned = cls.returned.rewind()
cls.second_returned_docs = list(cls.returned)
def should_find_all(self):
assert self.first_returned_docs == self.docs
assert self.second_returned_docs == self.docs
def should_return_find_example_instances(self):
for doc in self.first_returned_docs:
assert isinstance(doc, FindExample)
for doc in self.second_returned_docs:
assert isinstance(doc, FindExample)
class WhenFindingWithoutArgsOnClonedCursor(BaseFindTest):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find()
cls.first_returned_docs = list(cls.returned)
cls.returned = cls.returned.clone()
cls.second_returned_docs = list(cls.returned)
def should_find_all(self):
assert self.first_returned_docs == self.docs
assert self.second_returned_docs == self.docs
def should_return_find_example_instances(self):
for doc in self.first_returned_docs:
assert isinstance(doc, FindExample)
for doc in self.second_returned_docs:
assert isinstance(doc, FindExample)
class WhenNoDocumentsMatch(BaseFindTest):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find(
{'name': 'John'})
cls.returned_docs = list(cls.returned)
def should_return_0_results(self):
assert self.returned.count() == 0
class WhenFindingWithSkip(BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find().skip(1)
cls.returned_docs = list(cls.returned)
def should_return_Bob_and_Carl(self):
assert self.returned_docs == self.docs[1:]
class WhenFindingWithLimit(BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find().limit(1)
cls.returned_docs = list(cls.returned)
def should_return_only_first(self):
assert self.returned_docs == [self.docs[0]]
class WhenSortingByNameInverted(BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find().sort(
[('name', -1)])
cls.returned_docs = list(cls.returned)
def should_return_4_results(self):
assert self.returned.count() == 4
def should_return_Donna_Carl_Bob_and_Alice(self):
assert self.returned_docs[0] == self.docs[-1]
assert self.returned_docs[1] == self.docs[-2]
assert self.returned_docs[2] == self.docs[-3]
assert self.returned_docs[3] == self.docs[-4]
class WhenFilteringWithAWhereClause(BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find().where(
'this.age>35')
cls.returned_docs = list(cls.returned)
def should_return_1_result(self):
assert self.returned.count() == 1
def should_return_Carl(self):
assert self.returned_docs == [self.docs[2]]
class WhenGettingASlice(BaseFindTest):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find()[1:2]
cls.returned_docs = list(cls.returned)
def should_return_Bob_and_Carl(self):
assert self.returned_docs == self.docs[1:2]
def should_return_1_result(self):
assert self.returned.count(True) == 1
class WhenFindingAge32WithMaxScanOf1(
BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find(
{'age': 32}).max_scan(1)
cls.returned_docs = list(cls.returned)
def should_return_only_Alice(self):
assert self.returned_docs == [self.docs[0]]
class WhenFindingAllWithHint(BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find().hint(
[('name', 1)])
cls.returned_docs = list(cls.returned)
def should_find_all(self):
assert self.returned_docs == self.docs
class WhenFindingAllWithBatchSize(BaseFindTest, PropertyReturnsScalyMongoDocuments):
@classmethod
def setup_class(cls):
BaseFindTest.setup_class()
cls.returned = cls.connection.models.FindExample.find().batch_size(5)
cls.returned_docs = list(cls.returned)
def should_find_all(self):
assert self.returned_docs == self.docs
| bsd-3-clause | -104,822,185,336,807,870 | 27.614786 | 86 | 0.657057 | false | 3.777093 | true | false | false |
tcpcloud/python-heatclient | heatclient/tests/unit/test_template_format.py | 3 | 1628 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testscenarios
import testtools
import yaml
from heatclient.common import template_format
load_tests = testscenarios.load_tests_apply_scenarios
class YamlParseExceptions(testtools.TestCase):
scenarios = [
('scanner', dict(raised_exception=yaml.scanner.ScannerError())),
('parser', dict(raised_exception=yaml.parser.ParserError())),
('reader',
dict(raised_exception=yaml.reader.ReaderError('', '', '', '', ''))),
]
def test_parse_to_value_exception(self):
text = 'not important'
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = self.raised_exception
self.assertRaises(ValueError,
template_format.parse, text)
def test_parse_no_version_format(self):
yaml = ''
self.assertRaises(ValueError, template_format.parse, yaml)
yaml2 = '''Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
self.assertRaises(ValueError, template_format.parse, yaml2)
| apache-2.0 | 8,902,788,243,648,320,000 | 31.56 | 78 | 0.67199 | false | 4.13198 | true | false | false |
benjamincongdon/adept | enemy.py | 1 | 1072 | import math
import random
import pygame
from buffalo import utils
from npc import NPC
class Enemy(NPC):
# Class for anything hostile. For now it just follows the player around,
# There's not enough in the game in terms of health, damage, and item usage
# To have actual combat
def __init__(self, name=None, fPos=None, **kwargs):
speed = kwargs.get("speed") if kwargs.get("speed") is not None else .05
NPC.__init__(self, name=name, fPos=fPos, speed=speed, spawn=kwargs.get("spawn"))
def update(self, target):
# If it's close enough to the player it won't move
# If it's too far away it will stop trying
if self.fPos[0] != target[0] and math.hypot(self.fPos[1]-target[1], self.fPos[0]-target[0]) > 32 and math.hypot(self.fPos[1]-target[1], self.fPos[0]-target[0]) < 600:
# Some fancy trig to get the direction it needs to go to follow the player
angle = math.atan((self.fPos[1]-target[1])/(self.fPos[0]-target[0]))
if self.fPos[0] - target[0] > 0:
angle = math.pi + angle
self.move(angle)
else:
self.move(None)
NPC.update(self)
| mit | 228,071,826,013,174,430 | 32.5 | 168 | 0.684701 | false | 2.858667 | false | false | false |
Sh4kE/ofm_helper | core/migrations/0004_checklist_checklistitem.py | 2 | 1625 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-11-28 09:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0003_auto_20161119_0927'),
]
operations = [
migrations.CreateModel(
name='Checklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ChecklistItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('to_be_checked_on_matchday', models.IntegerField(blank=True, null=True)),
('to_be_checked_on_matchday_pattern', models.IntegerField(blank=True, null=True)),
('to_be_checked_if_home_match_tomorrow', models.BooleanField(default=False)),
('checklist', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Checklist')),
('last_checked_on_matchday', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Matchday')),
],
),
]
| agpl-3.0 | 8,514,440,296,711,046,000 | 42.918919 | 166 | 0.619692 | false | 3.934625 | false | false | false |
themiurgo/folium | folium/features.py | 1 | 26933 | # -*- coding: utf-8 -*-
"""
Features
------
Extra features Elements.
"""
from jinja2 import Template
import json
from .utilities import (color_brewer, _parse_size, legend_scaler,
_locations_mirror, _locations_tolist, image_to_url)
from .element import Element, Figure, JavascriptLink, CssLink, MacroElement
from .map import TileLayer, Icon
class WmsTileLayer(TileLayer):
def __init__(self, url, name=None,
format=None, layers=None, transparent=True,
attribution=None):
"""TODO docstring here
Parameters
----------
"""
super(TileLayer, self).__init__()
self._name = 'WmsTileLayer'
self.tile_name = name if name is not None else 'WmsTileLayer_'+self._id
self.url = url
self.format = format
self.layers = layers
self.transparent = transparent
# if attribution is None:
# raise ValueError('WMS must'
# ' also be passed an attribution')
self.attribution = attribution
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.tileLayer.wms(
'{{ this.url }}',
{
format:'{{ this.format }}',
transparent: {{ this.transparent.__str__().lower() }},
layers:'{{ this.layers }}'
{% if this.attribution %}, attribution:'{{this.attribution}}'{% endif %}
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class RegularPolygonMarker(MacroElement):
def __init__(self, location, popup=None,
color='black', opacity=1, weight=2,
fill_color='blue', fill_opacity=1,
number_of_sides=4, rotation=0, radius=15):
"""TODO : docstring here"""
super(RegularPolygonMarker, self).__init__()
self._name = 'RegularPolygonMarker'
self.location = location
self.color = color
self.opacity = opacity
self.weight = weight
self.fill_color = fill_color
self.fill_opacity = fill_opacity
self.number_of_sides = number_of_sides
self.rotation = rotation
self.radius = radius
if popup is not None:
self.add_children(popup)
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = new L.RegularPolygonMarker(
new L.LatLng({{this.location[0]}},{{this.location[1]}}),
{
icon : new L.Icon.Default(),
color: '{{this.color}}',
opacity: {{this.opacity}},
weight: {{this.weight}},
fillColor: '{{this.fill_color}}',
fillOpacity: {{this.fill_opacity}},
numberOfSides: {{this.number_of_sides}},
rotation: {{this.rotation}},
radius: {{this.radius}}
}
)
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def render(self, **kwargs):
super(RegularPolygonMarker, self).render()
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet-dvf/0.2/leaflet-dvf.markers.min.js"),
name='dvf_js')
class Vega(Element):
def __init__(self, data, width='100%', height='100%',
left="0%", top="0%", position='relative'):
"""TODO : docstring here"""
super(Vega, self).__init__()
self._name = 'Vega'
self.data = data
# Size Parameters.
self.width = _parse_size(width)
self.height = _parse_size(height)
self.left = _parse_size(left)
self.top = _parse_size(top)
self.position = position
self._template = Template(u"")
def render(self, **kwargs):
self.json = json.dumps(self.data)
self._parent.html.add_children(Element(Template("""
<div id="{{this.get_name()}}"></div>
""").render(this=self, kwargs=kwargs)), name=self.get_name())
self._parent.script.add_children(Element(Template("""
vega_parse({{this.json}},{{this.get_name()}});
""").render(this=self)), name=self.get_name())
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(Element(Template("""
<style> #{{this.get_name()}} {
position : {{this.position}};
width : {{this.width[0]}}{{this.width[1]}};
height: {{this.height[0]}}{{this.height[1]}};
left: {{this.left[0]}}{{this.left[1]}};
top: {{this.top[0]}}{{this.top[1]}};
</style>
""").render(this=self, **kwargs)), name=self.get_name())
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"),
name='d3')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/vega/1.4.3/vega.min.js"),
name='vega')
figure.header.add_children(
JavascriptLink("https://code.jquery.com/jquery-2.1.0.min.js"),
name='jquery')
figure.script.add_children(
Template("""function vega_parse(spec, div) {
vg.parse.spec(spec, function(chart) { chart({el:div}).update(); });}"""),
name='vega_parse')
class GeoJson(MacroElement):
def __init__(self, data):
"""Creates a GeoJson plugin to append into a map with
Map.add_plugin.
Parameters
----------
data: file, dict or str.
The geo-json data you want to plot.
If file, then data will be read in the file and fully embedded in Leaflet's javascript.
If dict, then data will be converted to JSON and embedded in the javascript.
If str, then data will be passed to the javascript as-is.
examples :
# providing file
GeoJson(open('foo.json'))
# providing dict
GeoJson(json.load(open('foo.json')))
# providing string
GeoJson(open('foo.json').read())
"""
super(GeoJson, self).__init__()
self._name = 'GeoJson'
if 'read' in dir(data):
self.data = data.read()
elif type(data) is dict:
self.data = json.dumps(data)
else:
self.data = data
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.geoJson({{this.data}}).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class TopoJson(MacroElement):
def __init__(self, data, object_path):
"""TODO docstring here.
"""
super(TopoJson, self).__init__()
self._name = 'TopoJson'
if 'read' in dir(data):
self.data = data.read()
elif type(data) is dict:
self.data = json.dumps(data)
else:
self.data = data
self.object_path = object_path
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}}_data = {{this.data}};
var {{this.get_name()}} = L.geoJson(topojson.feature(
{{this.get_name()}}_data,
{{this.get_name()}}_data.{{this.object_path}}
)).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
def render(self, **kwargs):
super(TopoJson, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/topojson/1.6.9/topojson.min.js"),
name='topojson')
class GeoJsonStyle(MacroElement):
def __init__(self, color_domain, color_code, color_data=None,
key_on='feature.properties.color',
weight=1, opacity=1, color='black',
fill_opacity=0.6, dash_array=0):
"""TODO : docstring here.
"""
super(GeoJsonStyle, self).__init__()
self._name = 'GeoJsonStyle'
self.color_domain = color_domain
self.color_range = color_brewer(color_code, n=len(color_domain))
self.color_data = json.dumps(color_data)
self.key_on = key_on
self.weight = weight
self.opacity = opacity
self.color = color
self.fill_color = color_code
self.fill_opacity = fill_opacity
self.dash_array = dash_array
self._template = Template(u"""
{% macro script(this, kwargs) %}
{% if not this.color_range %}
var {{this.get_name()}} = {
color_function : function(feature) {
return '{{this.fill_color}}';
},
};
{%else%}
var {{this.get_name()}} = {
color_scale : d3.scale.threshold()
.domain({{this.color_domain}})
.range({{this.color_range}}),
color_data : {{this.color_data}},
color_function : function(feature) {
{% if this.color_data=='null' %}
return this.color_scale({{this.key_on}});
{% else %}
return
this.color_scale(this.color_data[{{this.key_on}}]);
{% endif %}
},
};
{%endif%}
{{this._parent.get_name()}}.setStyle(function(feature) {
return {
fillColor: {{this.get_name()}}.color_function(feature),
weight: {{this.weight}},
opacity: {{this.opacity}},
color: '{{this.color}}',
fillOpacity: {{this.fill_opacity}},
dashArray: '{{this.dash_array}}'
};
});
{% endmacro %}
""")
def render(self, **kwargs):
super(GeoJsonStyle, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"),
name='d3')
class ColorScale(MacroElement):
def __init__(self, color_domain, color_code, caption=""):
"""TODO : docstring here.
"""
super(ColorScale, self).__init__()
self._name = 'ColorScale'
self.color_domain = color_domain
self.color_range = color_brewer(color_code, n=len(color_domain))
self.tick_labels = legend_scaler(self.color_domain)
self.caption = caption
self.fill_color = color_code
self._template = self._env.get_template('color_scale.js')
def render(self, **kwargs):
super(ColorScale, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.5/d3.min.js"),
name='d3')
class MarkerCluster(MacroElement):
"""Adds a MarkerCluster layer on the map."""
def __init__(self):
"""Creates a MarkerCluster element to append into a map with
Map.add_children.
Parameters
----------
"""
super(MarkerCluster, self).__init__()
self._name = 'MarkerCluster'
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.markerClusterGroup();
{{this._parent.get_name()}}.addLayer({{this.get_name()}});
{% endmacro %}
""")
def render(self, **kwargs):
super(MarkerCluster, self).render()
figure = self.get_root()
assert isinstance(figure, Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster-src.js"),
name='marker_cluster_src')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/leaflet.markercluster.js"),
name='marker_cluster')
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.css"),
name='marker_cluster_css')
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet.markercluster/0.4.0/MarkerCluster.Default.css"),
name="marker_cluster_default_css")
class DivIcon(MacroElement):
def __init__(self, width=30, height=30):
"""TODO : docstring here"""
super(DivIcon, self).__init__()
self._name = 'DivIcon'
self.width = width
self.height = height
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.divIcon({
className: 'leaflet-div-icon',
'iconSize': [{{ this.width }},{{ this.height }}]
});
{{this._parent.get_name()}}.setIcon({{this.get_name()}});
{% endmacro %}
""")
class CircleMarker(MacroElement):
def __init__(self, location, radius=500, color='black',
fill_color='black', fill_opacity=0.6, popup=None):
"""TODO : docstring here
"""
super(CircleMarker, self).__init__()
self._name = 'CircleMarker'
self.location = location
self.radius = radius
self.color = color
self.fill_color = fill_color
self.fill_opacity = fill_opacity
if popup is not None:
self.add_children(popup)
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.circle(
[{{this.location[0]}},{{this.location[1]}}],
{{ this.radius }},
{
color: '{{ this.color }}',
fillColor: '{{ this.fill_color }}',
fillOpacity: {{ this.fill_opacity }}
}
)
.addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class LatLngPopup(MacroElement):
def __init__(self):
"""TODO : docstring here
"""
super(LatLngPopup, self).__init__()
self._name = 'LatLngPopup'
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.popup();
function latLngPop(e) {
{{this.get_name()}}
.setLatLng(e.latlng)
.setContent("Latitude: " + e.latlng.lat.toFixed(4) +
"<br>Longitude: " + e.latlng.lng.toFixed(4))
.openOn({{this._parent.get_name()}});
}
{{this._parent.get_name()}}.on('click', latLngPop);
{% endmacro %}
""")
class ClickForMarker(MacroElement):
def __init__(self, popup=None):
"""TODO : docstring here
"""
super(ClickForMarker, self).__init__()
self._name = 'ClickForMarker'
if popup:
self.popup = ''.join(['"', popup, '"'])
else:
self.popup = '"Latitude: " + lat + "<br>Longitude: " + lng '
self._template = Template(u"""
{% macro script(this, kwargs) %}
function newMarker(e){
var new_mark = L.marker().setLatLng(e.latlng).addTo({{this._parent.get_name()}});
new_mark.dragging.enable();
new_mark.on('dblclick', function(e){ {{this._parent.get_name()}}.removeLayer(e.target)})
var lat = e.latlng.lat.toFixed(4),
lng = e.latlng.lng.toFixed(4);
new_mark.bindPopup({{ this.popup }});
};
{{this._parent.get_name()}}.on('click', newMarker);
{% endmacro %}
""")
class PolyLine(MacroElement):
def __init__(self, locations, color=None, weight=None,
opacity=None, latlon=True):
"""Creates a PolyLine object to append into a map with
Map.add_children.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
color: string, default Leaflet's default ('#03f')
weight: float, default Leaflet's default (5)
opacity: float, default Leaflet's default (0.5)
latlon: bool, default True
Whether locations are given in the form [[lat, lon]]
or not ([[lon, lat]] if False).
Note that the default GeoJson format is latlon=False,
while Leaflet polyline's default is latlon=True.
"""
super(PolyLine, self).__init__()
self._name = 'PolyLine'
self.data = (_locations_mirror(locations) if not latlon else
_locations_tolist(locations))
self.color = color
self.weight = weight
self.opacity = opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.polyline(
{{this.data}},
{
{% if this.color != None %}color: '{{ this.color }}',{% endif %}
{% if this.weight != None %}weight: {{ this.weight }},{% endif %}
{% if this.opacity != None %}opacity: {{ this.opacity }},{% endif %}
});
{{this._parent.get_name()}}.addLayer({{this.get_name()}});
{% endmacro %}
""")
class MultiPolyLine(MacroElement):
def __init__(self, locations, color=None, weight=None,
opacity=None, latlon=True):
"""Creates a MultiPolyLine object to append into a map with
Map.add_children.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
color: string, default Leaflet's default ('#03f')
weight: float, default Leaflet's default (5)
opacity: float, default Leaflet's default (0.5)
latlon: bool, default True
Whether locations are given in the form [[lat, lon]]
or not ([[lon, lat]] if False).
Note that the default GeoJson format is latlon=False,
while Leaflet polyline's default is latlon=True.
"""
super(MultiPolyLine, self).__init__()
self._name = 'MultiPolyLine'
self.data = (_locations_mirror(locations) if not latlon else
_locations_tolist(locations))
self.color = color
self.weight = weight
self.opacity = opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.multiPolyline(
{{this.data}},
{
{% if this.color != None %}color: '{{ this.color }}',{% endif %}
{% if this.weight != None %}weight: {{ this.weight }},{% endif %}
{% if this.opacity != None %}opacity: {{ this.opacity }},{% endif %}
});
{{this._parent.get_name()}}.addLayer({{this.get_name()}});
{% endmacro %}
""")
class ImageOverlay(MacroElement):
def __init__(self, image, bounds, opacity=1., attribution=None,
origin='upper', colormap=None, mercator_project=False):
"""Used to load and display a single image over specific bounds of
the map, implements ILayer interface.
Parameters
----------
image: string, file or array-like object
The data you want to draw on the map.
* If string, it will be written directly in the output file.
* If file, it's content will be converted as embeded in the output file.
* If array-like, it will be converted to PNG base64 string and embedded in the output.
bounds: list
Image bounds on the map in the form [[lat_min, lon_min], [lat_max, lon_max]]
opacity: float, default Leaflet's default (1.0)
attr: string, default Leaflet's default ("")
origin : ['upper' | 'lower'], optional, default 'upper'
Place the [0,0] index of the array in the upper left or
lower left corner of the axes.
colormap : callable, used only for `mono` image.
Function of the form [x -> (r,g,b)] or [x -> (r,g,b,a)]
for transforming a mono image into RGB.
It must output iterables of length 3 or 4, with values between 0. and 1.
Hint : you can use colormaps from `matplotlib.cm`.
mercator_project : bool, default False, used only for array-like image.
Transforms the data to project (longitude, latitude)
coordinates to the Mercator projection.
"""
super(ImageOverlay, self).__init__()
self._name = 'ImageOverlay'
self.url = image_to_url(image, origin=origin,
mercator_project=mercator_project,
bounds=bounds)
self.bounds = json.loads(json.dumps(bounds))
options = {
'opacity': opacity,
'attribution': attribution,
}
self.options = json.dumps({key: val for key, val
in options.items() if val},
sort_keys=True)
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.imageOverlay(
'{{ this.url }}',
{{ this.bounds }},
{{ this.options }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
class CustomIcon(Icon):
def __init__(self, icon_image, icon_size=None, icon_anchor=None,
shadow_image=None, shadow_size=None, shadow_anchor=None,
popup_anchor=None):
"""Create a custom icon, based on an image.
Parameters
----------
icon_image : string, file or array-like object
The data you want to use as an icon.
* If string, it will be written directly in the output file.
* If file, it's content will be converted as embedded in the output file.
* If array-like, it will be converted to PNG base64 string and embedded in the output.
icon_size : tuple of 2 int
Size of the icon image in pixels.
icon_anchor : tuple of 2 int
The coordinates of the "tip" of the icon
(relative to its top left corner).
The icon will be aligned so that this point is at the
marker's geographical location.
shadow_image : string, file or array-like object
The data for the shadow image. If not specified,
no shadow image will be created.
shadow_size : tuple of 2 int
Size of the shadow image in pixels.
shadow_anchor : tuple of 2 int
The coordinates of the "tip" of the shadow relative to its
top left corner (the same as icon_anchor if not specified).
popup_anchor : tuple of 2 int
The coordinates of the point from which popups will "open",
relative to the icon anchor.
"""
super(Icon, self).__init__()
self._name = 'CustomIcon'
self.icon_url = image_to_url(icon_image)
self.icon_size = icon_size
self.icon_anchor = icon_anchor
self.shadow_url = (image_to_url(shadow_image)
if shadow_image is not None else None)
self.shadow_size = shadow_size
self.shadow_anchor = shadow_anchor
self.popup_anchor = popup_anchor
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = L.icon({
iconUrl: '{{this.icon_url}}',
{% if this.icon_size %}iconSize: [{{this.icon_size[0]}},{{this.icon_size[1]}}],{% endif %}
{% if this.icon_anchor %}iconAnchor: [{{this.icon_anchor[0]}},{{this.icon_anchor[1]}}],{% endif %}
{% if this.shadow_url %}shadowUrl: '{{this.shadow_url}}',{% endif %}
{% if this.shadow_size %}shadowSize: [{{this.shadow_size[0]}},{{this.shadow_size[1]}}],{% endif %}
{% if this.shadow_anchor %}shadowAnchor: [{{this.shadow_anchor[0]}},{{this.shadow_anchor[1]}}],{% endif %}
{% if this.popup_anchor %}popupAnchor: [{{this.popup_anchor[0]}},{{this.popup_anchor[1]}}],{% endif %}
});
{{this._parent.get_name()}}.setIcon({{this.get_name()}});
{% endmacro %}
""")
| mit | 4,189,219,772,065,768,400 | 38.665685 | 126 | 0.502246 | false | 4.224122 | false | false | false |
hyperized/ansible | lib/ansible/module_utils/network/nxos/facts/l3_interfaces/l3_interfaces.py | 11 | 4905 | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)#!/usr/bin/python
"""
The nxos l3_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from copy import deepcopy
from ansible.module_utils.network.common import utils
from ansible.module_utils.network.nxos.argspec.l3_interfaces.l3_interfaces import L3_interfacesArgs
from ansible.module_utils.network.nxos.utils.utils import get_interface_type, validate_ipv4_addr, validate_ipv6_addr
class L3_interfacesFacts(object):
""" The nxos l3_interfaces fact class
"""
def __init__(self, module, subspec='config', options='options'):
self._module = module
self.argument_spec = L3_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for l3_interfaces
:param connection: the device connection
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
objs = []
if not data:
data = connection.get('show running-config | section ^interface')
config = data.split('interface ')
for conf in config:
conf = conf.strip()
if conf:
obj = self.render_config(self.generated_spec, conf)
if obj and len(obj.keys()) > 1:
objs.append(obj)
ansible_facts['ansible_network_resources'].pop('l3_interfaces', None)
facts = {}
if objs:
facts['l3_interfaces'] = []
params = utils.validate_config(self.argument_spec, {'config': objs})
for cfg in params['config']:
facts['l3_interfaces'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
def render_config(self, spec, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
config = deepcopy(spec)
match = re.search(r'^(\S+)', conf)
intf = match.group(1)
if get_interface_type(intf) == 'unknown':
return {}
config['name'] = intf
ipv4_match = re.compile(r'\n ip address (.*)')
matches = ipv4_match.findall(conf)
if matches:
if validate_ipv4_addr(matches[0]):
config['ipv4'] = []
for m in matches:
ipv4_conf = m.split()
addr = ipv4_conf[0]
ipv4_addr = addr if validate_ipv4_addr(addr) else None
if ipv4_addr:
config_dict = {'address': ipv4_addr}
if len(ipv4_conf) > 1:
d = ipv4_conf[1]
if d == 'secondary':
config_dict.update({'secondary': True})
if len(ipv4_conf) == 4:
if ipv4_conf[2] == 'tag':
config_dict.update({'tag': int(ipv4_conf[-1])})
elif d == 'tag':
config_dict.update({'tag': int(ipv4_conf[-1])})
config['ipv4'].append(config_dict)
ipv6_match = re.compile(r'\n ipv6 address (.*)')
matches = ipv6_match.findall(conf)
if matches:
if validate_ipv6_addr(matches[0]):
config['ipv6'] = []
for m in matches:
ipv6_conf = m.split()
addr = ipv6_conf[0]
ipv6_addr = addr if validate_ipv6_addr(addr) else None
if ipv6_addr:
config_dict = {'address': ipv6_addr}
if len(ipv6_conf) > 1:
d = ipv6_conf[1]
if d == 'tag':
config_dict.update({'tag': int(ipv6_conf[-1])})
config['ipv6'].append(config_dict)
return utils.remove_empties(config)
| gpl-3.0 | 8,065,667,181,968,915,000 | 37.928571 | 116 | 0.532314 | false | 4.246753 | true | false | false |
BFriedland/deck_of_cards | deck_of_cards.py | 2 | 3389 | import random
# Some of my assumptions:
# - Decks contain four suits with associated colors
# - Suits contain thirteen title-like designators
# - It doesn't matter if the deck's contents can change in any which way
# as long as the initial deck contains the correct 52 cards.
# - The random library is sufficient for randomizing things, particularly
# when used in the way it is used here.
# - The deck should be shuffled upon creation.
# - deck_of_cards.py will be imported to whatever application users desire
# to use a deck of cards in; the module has no interface but the API.
class Deck:
def __init__(self):
self.all_cards = []
for each_card_index in range(0, 52):
self.all_cards.append(Card(card_index=each_card_index))
self.shuffle()
def shuffle(self):
new_list = []
for each_card_index in range(0, 52):
number_of_cards_remaining = (52 - each_card_index)
# Unline range(), random.randint() is doubly end-inclusive.
# We need indices ranging from 0 to 51, inclusive.
# I chose NOT to roll the -1 in which_card_index_to_take
# into number_of_cards_remaining's defintion and add this comment
# to underline these facts.
which_card_index_to_take = (random.randint(0,
number_of_cards_remaining) - 1)
new_list.append(self.all_cards.pop(which_card_index_to_take))
self.all_cards = new_list
def deal_a_card(self):
if len(self.all_cards) >= 1:
return self.all_cards.pop()
# Makes viewing the state of the deck at a glance easier.
def __str__(self):
# Prints the full name of each Card in the Deck.
string_list = []
for each_card in self.all_cards:
string_list.append(each_card.proper_name)
print_string = "\n".join(string_list)
return print_string
def list(self):
return [each_card.proper_name for each_card in self.all_cards]
class Card:
def __init__(self, card_index=None):
# Support for making random Cards without a Deck:
if card_index is None:
import random
card_index = random.randint(0, 51)
# Separate from value; this is its absolute position relative to
# the ideal deck, pre-shuffle.
# It's logically equivalent to a specific pair of suit and value,
# such as the Ace of Spades or the 3 of Hearts.
self.card_index = card_index
suits = {0: "Club",
1: "Spade",
2: "Heart",
3: "Diamond"}
specials = {1: "Ace",
11: "Jack",
12: "Queen",
13: "King"}
self.suit = suits[(card_index % 4)]
if self.suit == "Club" or self.suit == "Spade":
self.color = "black"
else:
self.color = "red"
# +1 because value is 1 through 13; useful for various card games
self.value = ((card_index % 13) + 1)
if self.value in specials:
self.title = specials[self.value]
else:
self.title = str(self.value)
self.proper_name = str(self.title + " of " + self.suit + "s")
# Pretty print()ing.
def __str__(self):
return self.proper_name
| mit | 6,404,353,461,920,946,000 | 32.554455 | 77 | 0.576866 | false | 3.716009 | false | false | false |
danriti/raven-python | tests/transport/tests.py | 9 | 2708 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from raven.utils.testutils import TestCase
from raven.base import Client
# Some internal stuff to extend the transport layer
from raven.transport import Transport
from raven.transport.exceptions import DuplicateScheme
# Simplify comparing dicts with primitive values:
from raven.utils import json
import datetime
import calendar
import pytz
import zlib
class DummyScheme(Transport):
scheme = ['mock']
def __init__(self, parsed_url, timeout=5):
self._parsed_url = parsed_url
self.timeout = timeout
def send(self, data, headers):
"""
Sends a request to a remote webserver
"""
self._data = data
self._headers = headers
class TransportTest(TestCase):
def setUp(self):
try:
Client.register_scheme('mock', DummyScheme)
except DuplicateScheme:
pass
def test_basic_config(self):
c = Client(
dsn="mock://some_username:some_password@localhost:8143/1?timeout=1",
name="test_server"
)
assert c.remote.options == {
'timeout': '1',
}
def test_custom_transport(self):
c = Client(dsn="mock://some_username:some_password@localhost:8143/1")
data = dict(a=42, b=55, c=list(range(50)))
c.send(**data)
mock_cls = c._transport_cache['mock://some_username:some_password@localhost:8143/1'].get_transport()
expected_message = zlib.decompress(c.encode(data))
actual_message = zlib.decompress(mock_cls._data)
# These loads()/dumps() pairs order the dict keys before comparing the string.
# See GH504
self.assertEqual(
json.dumps(json.loads(expected_message.decode('utf-8')), sort_keys=True),
json.dumps(json.loads(actual_message.decode('utf-8')), sort_keys=True)
)
def test_build_then_send(self):
c = Client(
dsn="mock://some_username:some_password@localhost:8143/1",
name="test_server")
mydate = datetime.datetime(2012, 5, 4, tzinfo=pytz.utc)
d = calendar.timegm(mydate.timetuple())
msg = c.build_msg('raven.events.Message', message='foo', date=d)
expected = {
'project': '1',
'sentry.interfaces.Message': {'message': 'foo', 'params': ()},
'server_name': 'test_server',
'level': 40,
'tags': {},
'time_spent': None,
'timestamp': 1336089600,
'message': 'foo',
}
# The event_id is always overridden
del msg['event_id']
self.assertDictContainsSubset(expected, msg)
| bsd-3-clause | -5,826,906,757,774,607,000 | 28.434783 | 108 | 0.599335 | false | 3.902017 | true | false | false |
gkotton/neutron | neutron/plugins/vmware/plugins/base.py | 6 | 128952 | # Copyright 2012 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo.utils import excutils
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
import webob.exc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import base
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron import context as q_context
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import extraroute_db
from neutron.db import l3_db
from neutron.db import l3_dvr_db
from neutron.db import l3_gwmode_db
from neutron.db import models_v2
from neutron.db import portbindings_db
from neutron.db import portsecurity_db
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import external_net as ext_net_extn
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.extensions import multiprovidernet as mpnet
from neutron.extensions import portbindings as pbin
from neutron.extensions import portsecurity as psec
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as ext_sg
from neutron.i18n import _LE, _LI, _LW
from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as plugin_const
from neutron.plugins import vmware
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import config # noqa
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import nsx_utils
from neutron.plugins.vmware.common import securitygroups as sg_utils
from neutron.plugins.vmware.common import sync
from neutron.plugins.vmware.common import utils as c_utils
from neutron.plugins.vmware.dbexts import db as nsx_db
from neutron.plugins.vmware.dbexts import maclearning as mac_db
from neutron.plugins.vmware.dbexts import networkgw_db
from neutron.plugins.vmware.dbexts import qos_db
from neutron.plugins.vmware import dhcpmeta_modes
from neutron.plugins.vmware.extensions import maclearning as mac_ext
from neutron.plugins.vmware.extensions import networkgw
from neutron.plugins.vmware.extensions import qos
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
from neutron.plugins.vmware.nsxlib import queue as queuelib
from neutron.plugins.vmware.nsxlib import router as routerlib
from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
from neutron.plugins.vmware.nsxlib import switch as switchlib
LOG = logging.getLogger(__name__)
NSX_NOSNAT_RULES_ORDER = 10
NSX_FLOATINGIP_NAT_RULES_ORDER = 224
NSX_EXTGW_NAT_RULES_ORDER = 255
NSX_DEFAULT_NEXTHOP = '1.1.1.1'
class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
dhcpmeta_modes.DhcpMetadataAccess,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
external_net_db.External_net_db_mixin,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
mac_db.MacLearningDbMixin,
networkgw_db.NetworkGatewayMixin,
portbindings_db.PortBindingMixin,
portsecurity_db.PortSecurityDbMixin,
qos_db.QoSDbMixin,
securitygroups_db.SecurityGroupDbMixin):
supported_extension_aliases = ["allowed-address-pairs",
"binding",
"dvr",
"ext-gw-mode",
"extraroute",
"mac-learning",
"multi-provider",
"network-gateway",
"nvp-qos",
"port-security",
"provider",
"qos-queue",
"quotas",
"external-net",
"router",
"security-group"]
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
# Map nova zones to cluster for easy retrieval
novazone_cluster_map = {}
def __init__(self):
super(NsxPluginV2, self).__init__()
config.validate_config_options()
# TODO(salv-orlando): Replace These dicts with
# collections.defaultdict for better handling of default values
# Routines for managing logical ports in NSX
self.port_special_owners = [l3_db.DEVICE_OWNER_ROUTER_GW,
l3_db.DEVICE_OWNER_ROUTER_INTF]
self._port_drivers = {
'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nsx_create_ext_gw_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nsx_create_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nsx_create_router_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nsx_create_l2_gw_port,
'default': self._nsx_create_port},
'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nsx_delete_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nsx_delete_router_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nsx_delete_fip_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nsx_delete_port,
'default': self._nsx_delete_port}
}
neutron_extensions.append_api_extensions_path([vmware.NSX_EXT_PATH])
self.nsx_opts = cfg.CONF.NSX
self.nsx_sync_opts = cfg.CONF.NSX_SYNC
self.cluster = nsx_utils.create_nsx_cluster(
cfg.CONF,
self.nsx_opts.concurrent_connections,
self.nsx_opts.nsx_gen_timeout)
self.base_binding_dict = {
pbin.VIF_TYPE: pbin.VIF_TYPE_OVS,
pbin.VIF_DETAILS: {
# TODO(rkukura): Replace with new VIF security details
pbin.CAP_PORT_FILTER:
'security-group' in self.supported_extension_aliases}}
self._extend_fault_map()
self.setup_dhcpmeta_access()
# Set this flag to false as the default gateway has not
# been yet updated from the config file
self._is_default_net_gw_in_sync = False
# Create a synchronizer instance for backend sync
self._synchronizer = sync.NsxSynchronizer(
self.safe_reference, self.cluster,
self.nsx_sync_opts.state_sync_interval,
self.nsx_sync_opts.min_sync_req_delay,
self.nsx_sync_opts.min_chunk_size,
self.nsx_sync_opts.max_random_sync_delay)
def _ensure_default_network_gateway(self):
if self._is_default_net_gw_in_sync:
return
# Add the gw in the db as default, and unset any previous default
def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid
try:
ctx = q_context.get_admin_context()
self._unset_default_network_gateways(ctx)
if not def_l2_gw_uuid:
return
try:
def_network_gw = self._get_network_gateway(ctx,
def_l2_gw_uuid)
except networkgw_db.GatewayNotFound:
# Create in DB only - don't go to backend
def_gw_data = {'id': def_l2_gw_uuid,
'name': 'default L2 gateway service',
'devices': []}
gw_res_name = networkgw.GATEWAY_RESOURCE_NAME.replace('-', '_')
def_network_gw = super(
NsxPluginV2, self).create_network_gateway(
ctx, {gw_res_name: def_gw_data})
# In any case set is as default
self._set_default_network_gateway(ctx, def_network_gw['id'])
# Ensure this method is executed only once
self._is_default_net_gw_in_sync = True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Unable to process default l2 gw service: "
"%s"),
def_l2_gw_uuid)
def _build_ip_address_list(self, context, fixed_ips, subnet_ids=None):
"""Build ip_addresses data structure for logical router port.
No need to perform validation on IPs - this has already been
done in the l3_db mixin class.
"""
ip_addresses = []
for ip in fixed_ips:
if not subnet_ids or (ip['subnet_id'] in subnet_ids):
subnet = self._get_subnet(context, ip['subnet_id'])
ip_prefix = '%s/%s' % (ip['ip_address'],
subnet['cidr'].split('/')[1])
ip_addresses.append(ip_prefix)
return ip_addresses
def _create_and_attach_router_port(self, cluster, context,
nsx_router_id, port_data,
attachment_type, attachment,
attachment_vlan=None,
subnet_ids=None):
# Use a fake IP address if gateway port is not 'real'
ip_addresses = (port_data.get('fake_ext_gw') and
['0.0.0.0/31'] or
self._build_ip_address_list(context,
port_data['fixed_ips'],
subnet_ids))
try:
lrouter_port = routerlib.create_router_lport(
cluster, nsx_router_id, port_data.get('tenant_id', 'fake'),
port_data.get('id', 'fake'), port_data.get('name', 'fake'),
port_data.get('admin_state_up', True), ip_addresses,
port_data.get('mac_address'))
LOG.debug("Created NSX router port:%s", lrouter_port['uuid'])
except api_exc.NsxApiException:
LOG.exception(_LE("Unable to create port on NSX logical router "
"%s"),
nsx_router_id)
raise nsx_exc.NsxPluginException(
err_msg=_("Unable to create logical router port for neutron "
"port id %(port_id)s on router %(nsx_router_id)s") %
{'port_id': port_data.get('id'),
'nsx_router_id': nsx_router_id})
self._update_router_port_attachment(cluster, context, nsx_router_id,
port_data, lrouter_port['uuid'],
attachment_type, attachment,
attachment_vlan)
return lrouter_port
def _update_router_gw_info(self, context, router_id, info):
# NOTE(salvatore-orlando): We need to worry about rollback of NSX
# configuration in case of failures in the process
# Ref. LP bug 1102301
router = self._get_router(context, router_id)
# Check whether SNAT rule update should be triggered
# NSX also supports multiple external networks so there is also
# the possibility that NAT rules should be replaced
current_ext_net_id = router.gw_port_id and router.gw_port.network_id
new_ext_net_id = info and info.get('network_id')
# SNAT should be enabled unless info['enable_snat'] is
# explicitly set to false
enable_snat = new_ext_net_id and info.get('enable_snat', True)
# Remove if ext net removed, changed, or if snat disabled
remove_snat_rules = (current_ext_net_id and
new_ext_net_id != current_ext_net_id or
router.enable_snat and not enable_snat)
# Add rules if snat is enabled, and if either the external network
# changed or snat was previously disabled
# NOTE: enable_snat == True implies new_ext_net_id != None
add_snat_rules = (enable_snat and
(new_ext_net_id != current_ext_net_id or
not router.enable_snat))
router = super(NsxPluginV2, self)._update_router_gw_info(
context, router_id, info, router=router)
# Add/Remove SNAT rules as needed
# Create an elevated context for dealing with metadata access
# cidrs which are created within admin context
ctx_elevated = context.elevated()
if remove_snat_rules or add_snat_rules:
cidrs = self._find_router_subnets_cidrs(ctx_elevated, router_id)
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
if remove_snat_rules:
# Be safe and concede NAT rules might not exist.
# Therefore use min_num_expected=0
for cidr in cidrs:
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=0,
raise_on_len_mismatch=False,
source_ip_addresses=cidr)
if add_snat_rules:
ip_addresses = self._build_ip_address_list(
ctx_elevated, router.gw_port['fixed_ips'])
# Set the SNAT rule for each subnet (only first IP)
for cidr in cidrs:
cidr_prefix = int(cidr.split('/')[1])
routerlib.create_lrouter_snat_rule(
self.cluster, nsx_router_id,
ip_addresses[0].split('/')[0],
ip_addresses[0].split('/')[0],
order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix,
match_criteria={'source_ip_addresses': cidr})
def _update_router_port_attachment(self, cluster, context,
nsx_router_id, port_data,
nsx_router_port_id,
attachment_type,
attachment,
attachment_vlan=None):
if not nsx_router_port_id:
nsx_router_port_id = self._find_router_gw_port(context, port_data)
try:
routerlib.plug_router_port_attachment(cluster, nsx_router_id,
nsx_router_port_id,
attachment,
attachment_type,
attachment_vlan)
LOG.debug("Attached %(att)s to NSX router port %(port)s",
{'att': attachment, 'port': nsx_router_port_id})
except api_exc.NsxApiException:
# Must remove NSX logical port
routerlib.delete_router_lport(cluster, nsx_router_id,
nsx_router_port_id)
LOG.exception(_LE("Unable to plug attachment in NSX logical "
"router port %(r_port_id)s, associated with "
"Neutron %(q_port_id)s"),
{'r_port_id': nsx_router_port_id,
'q_port_id': port_data.get('id')})
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to plug attachment in router port "
"%(r_port_id)s for neutron port id %(q_port_id)s "
"on router %(router_id)s") %
{'r_port_id': nsx_router_port_id,
'q_port_id': port_data.get('id'),
'router_id': nsx_router_id}))
def _get_port_by_device_id(self, context, device_id, device_owner):
"""Retrieve ports associated with a specific device id.
Used for retrieving all neutron ports attached to a given router.
"""
port_qry = context.session.query(models_v2.Port)
return port_qry.filter_by(
device_id=device_id,
device_owner=device_owner,).all()
def _find_router_subnets_cidrs(self, context, router_id):
"""Retrieve subnets attached to the specified router."""
ports = self._get_port_by_device_id(context, router_id,
l3_db.DEVICE_OWNER_ROUTER_INTF)
# No need to check for overlapping CIDRs
cidrs = []
for port in ports:
for ip in port.get('fixed_ips', []):
cidrs.append(self._get_subnet(context,
ip.subnet_id).cidr)
return cidrs
def _nsx_find_lswitch_for_port(self, context, port_data):
network = self._get_network(context, port_data['network_id'])
network_bindings = nsx_db.get_network_bindings(
context.session, port_data['network_id'])
max_ports = self.nsx_opts.max_lp_per_overlay_ls
allow_extra_lswitches = False
for network_binding in network_bindings:
if network_binding.binding_type in (c_utils.NetworkTypes.FLAT,
c_utils.NetworkTypes.VLAN):
max_ports = self.nsx_opts.max_lp_per_bridged_ls
allow_extra_lswitches = True
break
try:
return self._handle_lswitch_selection(
context, self.cluster, network, network_bindings,
max_ports, allow_extra_lswitches)
except api_exc.NsxApiException:
err_desc = _("An exception occurred while selecting logical "
"switch for the port")
LOG.exception(err_desc)
raise nsx_exc.NsxPluginException(err_msg=err_desc)
def _nsx_create_port_helper(self, session, ls_uuid, port_data,
do_port_security=True):
# Convert Neutron security groups identifiers into NSX security
# profiles identifiers
nsx_sec_profile_ids = [
nsx_utils.get_nsx_security_group_id(
session, self.cluster, neutron_sg_id) for
neutron_sg_id in (port_data[ext_sg.SECURITYGROUPS] or [])]
return switchlib.create_lport(self.cluster,
ls_uuid,
port_data['tenant_id'],
port_data['id'],
port_data['name'],
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
port_data['fixed_ips'],
port_data[psec.PORTSECURITY],
nsx_sec_profile_ids,
port_data.get(qos.QUEUE),
port_data.get(mac_ext.MAC_LEARNING),
port_data.get(addr_pair.ADDRESS_PAIRS))
def _handle_create_port_exception(self, context, port_id,
ls_uuid, lp_uuid):
with excutils.save_and_reraise_exception():
# rollback nsx logical port only if it was successfully
# created on NSX. Should this command fail the original
# exception will be raised.
if lp_uuid:
# Remove orphaned port from NSX
switchlib.delete_port(self.cluster, ls_uuid, lp_uuid)
# rollback the neutron-nsx port mapping
nsx_db.delete_neutron_nsx_port_mapping(context.session,
port_id)
LOG.exception(_LE("An exception occurred while creating the "
"neutron port %s on the NSX plaform"), port_id)
def _nsx_create_port(self, context, port_data):
"""Driver for creating a logical switch port on NSX platform."""
# FIXME(salvatore-orlando): On the NSX platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.info(_LI("NSX plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
lport = None
selected_lswitch = None
try:
selected_lswitch = self._nsx_find_lswitch_for_port(context,
port_data)
lport = self._nsx_create_port_helper(context.session,
selected_lswitch['uuid'],
port_data,
True)
nsx_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
if port_data['device_owner'] not in self.port_special_owners:
switchlib.plug_vif_interface(
self.cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment", port_data['id'])
LOG.debug("_nsx_create_port completed for port %(name)s "
"on network %(network_id)s. The new port id is "
"%(id)s.", port_data)
except (api_exc.NsxApiException, n_exc.NeutronException):
self._handle_create_port_exception(
context, port_data['id'],
selected_lswitch and selected_lswitch['uuid'],
lport and lport['uuid'])
except db_exc.DBError as e:
if (port_data['device_owner'] == constants.DEVICE_OWNER_DHCP and
isinstance(e.inner_exception, sql_exc.IntegrityError)):
LOG.warning(
_LW("Concurrent network deletion detected; Back-end "
"Port %(nsx_id)s creation to be rolled back for "
"Neutron port: %(neutron_id)s"),
{'nsx_id': lport['uuid'],
'neutron_id': port_data['id']})
if selected_lswitch and lport:
try:
switchlib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
except n_exc.NotFound:
LOG.debug("NSX Port %s already gone", lport['uuid'])
def _nsx_delete_port(self, context, port_data):
# FIXME(salvatore-orlando): On the NSX platform we do not really have
# external networks. So deleting regular ports from external networks
# does not make sense. However we cannot raise as this would break
# unit tests.
if self._network_is_external(context, port_data['network_id']):
LOG.info(_LI("NSX plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
return
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nsx_port_id:
LOG.debug("Port '%s' was already deleted on NSX platform", id)
return
# TODO(bgh): if this is a bridged network and the lswitch we just got
# back will have zero ports after the delete we should garbage collect
# the lswitch.
try:
switchlib.delete_port(self.cluster, nsx_switch_id, nsx_port_id)
LOG.debug("_nsx_delete_port completed for port %(port_id)s "
"on network %(net_id)s",
{'port_id': port_data['id'],
'net_id': port_data['network_id']})
except n_exc.NotFound:
LOG.warning(_LW("Port %s not found in NSX"), port_data['id'])
def _nsx_delete_router_port(self, context, port_data):
# Delete logical router port
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, port_data['device_id'])
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nsx_port_id:
LOG.warn(
_LW("Neutron port %(port_id)s not found on NSX backend. "
"Terminating delete operation. A dangling router port "
"might have been left on router %(router_id)s"),
{'port_id': port_data['id'],
'router_id': nsx_router_id})
return
try:
routerlib.delete_peer_router_lport(self.cluster,
nsx_router_id,
nsx_switch_id,
nsx_port_id)
except api_exc.NsxApiException:
# Do not raise because the issue might as well be that the
# router has already been deleted, so there would be nothing
# to do here
LOG.exception(_LE("Ignoring exception as this means the peer "
"for port '%s' has already been deleted."),
nsx_port_id)
# Delete logical switch port
self._nsx_delete_port(context, port_data)
def _nsx_create_router_port(self, context, port_data):
"""Driver for creating a switch port to be connected to a router."""
# No router ports on external networks!
if self._network_is_external(context, port_data['network_id']):
raise nsx_exc.NsxPluginException(
err_msg=(_("It is not allowed to create router interface "
"ports on external networks as '%s'") %
port_data['network_id']))
ls_port = None
selected_lswitch = None
try:
selected_lswitch = self._nsx_find_lswitch_for_port(
context, port_data)
# Do not apply port security here!
ls_port = self._nsx_create_port_helper(
context.session, selected_lswitch['uuid'],
port_data, False)
# Assuming subnet being attached is on first fixed ip
# element in port data
subnet_id = port_data['fixed_ips'][0]['subnet_id']
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, port_data['device_id'])
# Create peer port on logical router
self._create_and_attach_router_port(
self.cluster, context, nsx_router_id, port_data,
"PatchAttachment", ls_port['uuid'],
subnet_ids=[subnet_id])
nsx_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], ls_port['uuid'])
LOG.debug("_nsx_create_router_port completed for port "
"%(name)s on network %(network_id)s. The new "
"port id is %(id)s.",
port_data)
except (api_exc.NsxApiException, n_exc.NeutronException):
self._handle_create_port_exception(
context, port_data['id'],
selected_lswitch and selected_lswitch['uuid'],
ls_port and ls_port['uuid'])
def _find_router_gw_port(self, context, port_data):
router_id = port_data['device_id']
if not router_id:
raise n_exc.BadRequest(_("device_id field must be populated in "
"order to create an external gateway "
"port for network %s"),
port_data['network_id'])
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
lr_port = routerlib.find_router_gw_port(context, self.cluster,
nsx_router_id)
if not lr_port:
raise nsx_exc.NsxPluginException(
err_msg=(_("The gateway port for the NSX router %s "
"was not found on the backend")
% nsx_router_id))
return lr_port
@lockutils.synchronized('vmware', 'neutron-')
def _nsx_create_ext_gw_port(self, context, port_data):
"""Driver for creating an external gateway port on NSX platform."""
# TODO(salvatore-orlando): Handle NSX resource
# rollback when something goes not quite as expected
lr_port = self._find_router_gw_port(context, port_data)
ip_addresses = self._build_ip_address_list(context,
port_data['fixed_ips'])
# This operation actually always updates a NSX logical port
# instead of creating one. This is because the gateway port
# is created at the same time as the NSX logical router, otherwise
# the fabric status of the NSX router will be down.
# admin_status should always be up for the gateway port
# regardless of what the user specifies in neutron
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, port_data['device_id'])
routerlib.update_router_lport(self.cluster,
nsx_router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
ip_addresses)
ext_network = self.get_network(context, port_data['network_id'])
if ext_network.get(pnet.NETWORK_TYPE) == c_utils.NetworkTypes.L3_EXT:
# Update attachment
physical_network = (ext_network[pnet.PHYSICAL_NETWORK] or
self.cluster.default_l3_gw_service_uuid)
self._update_router_port_attachment(
self.cluster, context, nsx_router_id, port_data,
lr_port['uuid'],
"L3GatewayAttachment",
physical_network,
ext_network[pnet.SEGMENTATION_ID])
LOG.debug("_nsx_create_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to router:%(router_id)s. "
"NSX port id is %(nsx_port_id)s",
{'ext_net_id': port_data['network_id'],
'router_id': nsx_router_id,
'nsx_port_id': lr_port['uuid']})
@lockutils.synchronized('vmware', 'neutron-')
def _nsx_delete_ext_gw_port(self, context, port_data):
lr_port = self._find_router_gw_port(context, port_data)
# TODO(salvatore-orlando): Handle NSX resource
# rollback when something goes not quite as expected
try:
# Delete is actually never a real delete, otherwise the NSX
# logical router will stop working
router_id = port_data['device_id']
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
routerlib.update_router_lport(self.cluster,
nsx_router_id,
lr_port['uuid'],
port_data['tenant_id'],
port_data['id'],
port_data['name'],
True,
['0.0.0.0/31'])
# Reset attachment
self._update_router_port_attachment(
self.cluster, context, nsx_router_id, port_data,
lr_port['uuid'],
"L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid)
except api_exc.ResourceNotFound:
raise nsx_exc.NsxPluginException(
err_msg=_("Logical router resource %s not found "
"on NSX platform") % router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=_("Unable to update logical router"
"on NSX Platform"))
LOG.debug("_nsx_delete_ext_gw_port completed on external network "
"%(ext_net_id)s, attached to NSX router:%(router_id)s",
{'ext_net_id': port_data['network_id'],
'router_id': nsx_router_id})
def _nsx_create_l2_gw_port(self, context, port_data):
"""Create a switch port, and attach it to a L2 gateway attachment."""
# FIXME(salvatore-orlando): On the NSX platform we do not really have
# external networks. So if as user tries and create a "regular" VIF
# port on an external network we are unable to actually create.
# However, in order to not break unit tests, we need to still create
# the DB object and return success
if self._network_is_external(context, port_data['network_id']):
LOG.info(_LI("NSX plugin does not support regular VIF ports on "
"external networks. Port %s will be down."),
port_data['network_id'])
# No need to actually update the DB state - the default is down
return port_data
lport = None
try:
selected_lswitch = self._nsx_find_lswitch_for_port(
context, port_data)
lport = self._nsx_create_port_helper(
context.session,
selected_lswitch['uuid'],
port_data,
True)
nsx_db.add_neutron_nsx_port_mapping(
context.session, port_data['id'],
selected_lswitch['uuid'], lport['uuid'])
l2gwlib.plug_l2_gw_service(
self.cluster,
selected_lswitch['uuid'],
lport['uuid'],
port_data['device_id'],
int(port_data.get('gw:segmentation_id') or 0))
except Exception:
with excutils.save_and_reraise_exception():
if lport:
switchlib.delete_port(self.cluster,
selected_lswitch['uuid'],
lport['uuid'])
LOG.debug("_nsx_create_l2_gw_port completed for port %(name)s "
"on network %(network_id)s. The new port id "
"is %(id)s.", port_data)
def _nsx_create_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NSX,
# this is a no-op driver
pass
def _nsx_delete_fip_port(self, context, port_data):
# As we do not create ports for floating IPs in NSX,
# this is a no-op driver
pass
def _extend_fault_map(self):
"""Extends the Neutron Fault Map.
Exceptions specific to the NSX Plugin are mapped to standard
HTTP Exceptions.
"""
base.FAULT_MAP.update({nsx_exc.InvalidNovaZone:
webob.exc.HTTPBadRequest,
nsx_exc.NoMorePortsException:
webob.exc.HTTPBadRequest,
nsx_exc.MaintenanceInProgress:
webob.exc.HTTPServiceUnavailable,
nsx_exc.InvalidSecurityCertificate:
webob.exc.HTTPBadRequest})
def _validate_provider_create(self, context, network):
segments = network.get(mpnet.SEGMENTS)
if not attr.is_attr_set(segments):
return
mpnet.check_duplicate_segments(segments)
for segment in segments:
network_type = segment.get(pnet.NETWORK_TYPE)
physical_network = segment.get(pnet.PHYSICAL_NETWORK)
physical_network_set = attr.is_attr_set(physical_network)
segmentation_id = segment.get(pnet.SEGMENTATION_ID)
network_type_set = attr.is_attr_set(network_type)
segmentation_id_set = attr.is_attr_set(segmentation_id)
# If the physical_network_uuid isn't passed in use the default one.
if not physical_network_set:
physical_network = cfg.CONF.default_tz_uuid
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
elif network_type in (c_utils.NetworkTypes.GRE,
c_utils.NetworkTypes.STT,
c_utils.NetworkTypes.FLAT):
if segmentation_id_set:
err_msg = _("Segmentation ID cannot be specified with "
"flat network type")
elif network_type == c_utils.NetworkTypes.VLAN:
if not segmentation_id_set:
err_msg = _("Segmentation ID must be specified with "
"vlan network type")
elif (segmentation_id_set and
not utils.is_valid_vlan_tag(segmentation_id)):
err_msg = (_("%(segmentation_id)s out of range "
"(%(min_id)s through %(max_id)s)") %
{'segmentation_id': segmentation_id,
'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG})
else:
# Verify segment is not already allocated
bindings = (
nsx_db.get_network_bindings_by_vlanid_and_physical_net(
context.session, segmentation_id,
physical_network)
)
if bindings:
raise n_exc.VlanIdInUse(
vlan_id=segmentation_id,
physical_network=physical_network)
elif network_type == c_utils.NetworkTypes.L3_EXT:
if (segmentation_id_set and
not utils.is_valid_vlan_tag(segmentation_id)):
err_msg = (_("%(segmentation_id)s out of range "
"(%(min_id)s through %(max_id)s)") %
{'segmentation_id': segmentation_id,
'min_id': constants.MIN_VLAN_TAG,
'max_id': constants.MAX_VLAN_TAG})
else:
err_msg = (_("%(net_type_param)s %(net_type_value)s not "
"supported") %
{'net_type_param': pnet.NETWORK_TYPE,
'net_type_value': network_type})
if err_msg:
raise n_exc.InvalidInput(error_message=err_msg)
# TODO(salvatore-orlando): Validate tranport zone uuid
# which should be specified in physical_network
def _extend_network_dict_provider(self, context, network,
multiprovider=None, bindings=None):
if not bindings:
bindings = nsx_db.get_network_bindings(context.session,
network['id'])
if not multiprovider:
multiprovider = nsx_db.is_multiprovider_network(context.session,
network['id'])
# With NSX plugin 'normal' overlay networks will have no binding
# TODO(salvatore-orlando) make sure users can specify a distinct
# phy_uuid as 'provider network' for STT net type
if bindings:
if not multiprovider:
# network came in through provider networks api
network[pnet.NETWORK_TYPE] = bindings[0].binding_type
network[pnet.PHYSICAL_NETWORK] = bindings[0].phy_uuid
network[pnet.SEGMENTATION_ID] = bindings[0].vlan_id
else:
# network come in though multiprovider networks api
network[mpnet.SEGMENTS] = [
{pnet.NETWORK_TYPE: binding.binding_type,
pnet.PHYSICAL_NETWORK: binding.phy_uuid,
pnet.SEGMENTATION_ID: binding.vlan_id}
for binding in bindings]
def extend_port_dict_binding(self, port_res, port_db):
super(NsxPluginV2, self).extend_port_dict_binding(port_res, port_db)
port_res[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL
def _handle_lswitch_selection(self, context, cluster, network,
network_bindings, max_ports,
allow_extra_lswitches):
lswitches = nsx_utils.fetch_nsx_switches(
context.session, cluster, network.id)
try:
return [ls for ls in lswitches
if (ls['_relations']['LogicalSwitchStatus']
['lport_count'] < max_ports)].pop(0)
except IndexError:
# Too bad, no switch available
LOG.debug("No switch has available ports (%d checked)",
len(lswitches))
if allow_extra_lswitches:
# The 'main' logical switch is either the only one available
# or the one where the 'multi_lswitch' tag was set
while lswitches:
main_ls = lswitches.pop(0)
tag_dict = dict((x['scope'], x['tag'])
for x in main_ls['tags'])
if 'multi_lswitch' in tag_dict:
break
else:
# by construction this statement is hit if there is only one
# logical switch and the multi_lswitch tag has not been set.
# The tag must therefore be added.
tags = main_ls['tags']
tags.append({'tag': 'True', 'scope': 'multi_lswitch'})
switchlib.update_lswitch(cluster,
main_ls['uuid'],
main_ls['display_name'],
network['tenant_id'],
tags=tags)
transport_zone_config = self._convert_to_nsx_transport_zones(
cluster, network, bindings=network_bindings)
selected_lswitch = switchlib.create_lswitch(
cluster, network.id, network.tenant_id,
"%s-ext-%s" % (network.name, len(lswitches)),
transport_zone_config)
# add a mapping between the neutron network and the newly
# created logical switch
nsx_db.add_neutron_nsx_network_mapping(
context.session, network.id, selected_lswitch['uuid'])
return selected_lswitch
else:
LOG.error(_LE("Maximum number of logical ports reached for "
"logical network %s"), network.id)
raise nsx_exc.NoMorePortsException(network=network.id)
def _convert_to_nsx_transport_zones(self, cluster, network=None,
bindings=None):
# TODO(salv-orlando): Remove this method and call nsx-utils direct
return nsx_utils.convert_to_nsx_transport_zones(
cluster.default_tz_uuid, network, bindings,
default_transport_type=cfg.CONF.NSX.default_transport_type)
def _convert_to_transport_zones_dict(self, network):
"""Converts the provider request body to multiprovider.
Returns: True if request is multiprovider False if provider
and None if neither.
"""
if any(attr.is_attr_set(network.get(f))
for f in (pnet.NETWORK_TYPE, pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)):
if attr.is_attr_set(network.get(mpnet.SEGMENTS)):
raise mpnet.SegmentsSetInConjunctionWithProviders()
# convert to transport zone list
network[mpnet.SEGMENTS] = [
{pnet.NETWORK_TYPE: network[pnet.NETWORK_TYPE],
pnet.PHYSICAL_NETWORK: network[pnet.PHYSICAL_NETWORK],
pnet.SEGMENTATION_ID: network[pnet.SEGMENTATION_ID]}]
del network[pnet.NETWORK_TYPE]
del network[pnet.PHYSICAL_NETWORK]
del network[pnet.SEGMENTATION_ID]
return False
if attr.is_attr_set(mpnet.SEGMENTS):
return True
def create_network(self, context, network):
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
self._ensure_default_security_group(context, tenant_id)
# Process the provider network extension
provider_type = self._convert_to_transport_zones_dict(net_data)
self._validate_provider_create(context, net_data)
# Replace ATTR_NOT_SPECIFIED with None before sending to NSX
for key, value in network['network'].iteritems():
if value is attr.ATTR_NOT_SPECIFIED:
net_data[key] = None
# FIXME(arosen) implement admin_state_up = False in NSX
if net_data['admin_state_up'] is False:
LOG.warning(_LW("Network with admin_state_up=False are not yet "
"supported by this plugin. Ignoring setting for "
"network %s"), net_data.get('name', '<unknown>'))
transport_zone_config = self._convert_to_nsx_transport_zones(
self.cluster, net_data)
external = net_data.get(ext_net_extn.EXTERNAL)
# NOTE(salv-orlando): Pre-generating uuid for Neutron
# network. This will be removed once the network create operation
# becomes an asynchronous task
net_data['id'] = str(uuid.uuid4())
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
lswitch = switchlib.create_lswitch(
self.cluster, net_data['id'],
tenant_id, net_data.get('name'),
transport_zone_config,
shared=net_data.get(attr.SHARED))
with context.session.begin(subtransactions=True):
new_net = super(NsxPluginV2, self).create_network(context,
network)
# Process port security extension
self._process_network_port_security_create(
context, net_data, new_net)
# DB Operations for setting the network as external
self._process_l3_create(context, new_net, net_data)
# Process QoS queue extension
net_queue_id = net_data.get(qos.QUEUE)
if net_queue_id:
# Raises if not found
self.get_qos_queue(context, net_queue_id)
self._process_network_queue_mapping(
context, new_net, net_queue_id)
# Add mapping between neutron network and NSX switch
if (not attr.is_attr_set(external) or
attr.is_attr_set(external) and not external):
nsx_db.add_neutron_nsx_network_mapping(
context.session, new_net['id'],
lswitch['uuid'])
if (net_data.get(mpnet.SEGMENTS) and
isinstance(provider_type, bool)):
net_bindings = []
for tz in net_data[mpnet.SEGMENTS]:
segmentation_id = tz.get(pnet.SEGMENTATION_ID, 0)
segmentation_id_set = attr.is_attr_set(segmentation_id)
if not segmentation_id_set:
segmentation_id = 0
net_bindings.append(nsx_db.add_network_binding(
context.session, new_net['id'],
tz.get(pnet.NETWORK_TYPE),
tz.get(pnet.PHYSICAL_NETWORK),
segmentation_id))
if provider_type:
nsx_db.set_multiprovider_network(context.session,
new_net['id'])
self._extend_network_dict_provider(context, new_net,
provider_type,
net_bindings)
self.handle_network_dhcp_access(context, new_net,
action='create_network')
return new_net
def delete_network(self, context, id):
external = self._network_is_external(context, id)
# Before removing entry from Neutron DB, retrieve NSX switch
# identifiers for removing them from backend
if not external:
lswitch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, id)
nsx_db.delete_network_bindings(context.session, id)
super(NsxPluginV2, self).delete_network(context, id)
# Do not go to NSX for external networks
if not external:
try:
switchlib.delete_networks(self.cluster, id, lswitch_ids)
except n_exc.NotFound:
LOG.warning(_LW("The following logical switches were not "
"found on the NSX backend:%s"), lswitch_ids)
self.handle_network_dhcp_access(context, id, action='delete_network')
LOG.debug("Delete network complete for network: %s", id)
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
# goto to the plugin DB and fetch the network
network = self._get_network(context, id)
if (self.nsx_sync_opts.always_read_status or
fields and 'status' in fields):
# External networks are not backed by nsx lswitches
if not network.external:
# Perform explicit state synchronization
self._synchronizer.synchronize_network(context, network)
# Don't do field selection here otherwise we won't be able
# to add provider networks fields
net_result = self._make_network_dict(network)
self._extend_network_dict_provider(context, net_result)
return self._fields(net_result, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
filters = filters or {}
with context.session.begin(subtransactions=True):
networks = (
super(NsxPluginV2, self).get_networks(
context, filters, fields, sorts,
limit, marker, page_reverse))
for net in networks:
self._extend_network_dict_provider(context, net)
return [self._fields(network, fields) for network in networks]
def update_network(self, context, id, network):
pnet._raise_if_updates_provider_attributes(network['network'])
if network["network"].get("admin_state_up") is False:
raise NotImplementedError(_("admin_state_up=False networks "
"are not supported."))
with context.session.begin(subtransactions=True):
net = super(NsxPluginV2, self).update_network(context, id, network)
if psec.PORTSECURITY in network['network']:
self._process_network_port_security_update(
context, network['network'], net)
net_queue_id = network['network'].get(qos.QUEUE)
if net_queue_id:
self._delete_network_queue_mapping(context, id)
self._process_network_queue_mapping(context, net, net_queue_id)
self._process_l3_update(context, net, network['network'])
self._extend_network_dict_provider(context, net)
# If provided, update port name on backend; treat backend failures as
# not critical (log error, but do not raise)
if 'name' in network['network']:
# in case of chained switches update name only for the first one
nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id)
if not nsx_switch_ids or len(nsx_switch_ids) < 1:
LOG.warn(_LW("Unable to find NSX mappings for neutron "
"network:%s"), id)
try:
switchlib.update_lswitch(self.cluster,
nsx_switch_ids[0],
network['network']['name'])
except api_exc.NsxApiException as e:
LOG.warn(_LW("Logical switch update on NSX backend failed. "
"Neutron network id:%(net_id)s; "
"NSX lswitch id:%(lswitch_id)s;"
"Error:%(error)s"),
{'net_id': id, 'lswitch_id': nsx_switch_ids[0],
'error': e})
return net
def create_port(self, context, port):
# If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
# then we pass the port to the policy engine. The reason why we don't
# pass the value to the policy engine when the port is
# ATTR_NOT_SPECIFIED is for the case where a port is created on a
# shared network that is not owned by the tenant.
port_data = port['port']
# Set port status as 'DOWN'. This will be updated by backend sync.
port_data['status'] = constants.PORT_STATUS_DOWN
with context.session.begin(subtransactions=True):
# First we allocate port in neutron database
neutron_db = super(NsxPluginV2, self).create_port(context, port)
neutron_port_id = neutron_db['id']
# Update fields obtained from neutron db (eg: MAC address)
port["port"].update(neutron_db)
self.handle_port_metadata_access(context, neutron_db)
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
port_data[psec.PORTSECURITY] = port_security
self._process_port_port_security_create(
context, port_data, neutron_db)
# allowed address pair checks
if attr.is_attr_set(port_data.get(addr_pair.ADDRESS_PAIRS)):
if not port_security:
raise addr_pair.AddressPairAndPortSecurityRequired()
else:
self._process_create_allowed_address_pairs(
context, neutron_db,
port_data[addr_pair.ADDRESS_PAIRS])
else:
# remove ATTR_NOT_SPECIFIED
port_data[addr_pair.ADDRESS_PAIRS] = []
# security group extension checks
if port_security and has_ip:
self._ensure_default_security_group_on_port(context, port)
elif attr.is_attr_set(port_data.get(ext_sg.SECURITYGROUPS)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
port_data[ext_sg.SECURITYGROUPS] = (
self._get_security_groups_on_port(context, port))
self._process_port_create_security_group(
context, port_data, port_data[ext_sg.SECURITYGROUPS])
# QoS extension checks
port_queue_id = self._check_for_queue_and_create(
context, port_data)
self._process_port_queue_mapping(
context, port_data, port_queue_id)
if (isinstance(port_data.get(mac_ext.MAC_LEARNING), bool)):
self._create_mac_learning_state(context, port_data)
elif mac_ext.MAC_LEARNING in port_data:
port_data.pop(mac_ext.MAC_LEARNING)
self._process_portbindings_create_and_update(context,
port['port'],
port_data)
# For some reason the port bindings DB mixin does not handle
# the VNIC_TYPE attribute, which is required by nova for
# setting up VIFs.
context.session.flush()
port_data[pbin.VNIC_TYPE] = pbin.VNIC_NORMAL
# DB Operation is complete, perform NSX operation
try:
port_data = port['port'].copy()
port_create_func = self._port_drivers['create'].get(
port_data['device_owner'],
self._port_drivers['create']['default'])
port_create_func(context, port_data)
LOG.debug("port created on NSX backend for tenant "
"%(tenant_id)s: (%(id)s)", port_data)
except n_exc.NotFound:
LOG.warning(_LW("Logical switch for network %s was not "
"found in NSX."), port_data['network_id'])
# Put port in error on neutron DB
with context.session.begin(subtransactions=True):
port = self._get_port(context, neutron_port_id)
port_data['status'] = constants.PORT_STATUS_ERROR
port['status'] = port_data['status']
context.session.add(port)
except Exception:
# Port must be removed from neutron DB
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to create port or set port "
"attachment in NSX."))
with context.session.begin(subtransactions=True):
self._delete_port(context, neutron_port_id)
self.handle_port_dhcp_access(context, port_data, action='create_port')
return port_data
def update_port(self, context, id, port):
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(NsxPluginV2, self).update_port(
context, id, port)
# Save current mac learning state to check whether it's
# being updated or not
old_mac_learning_state = ret_port.get(mac_ext.MAC_LEARNING)
# copy values over - except fixed_ips as
# they've already been processed
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
tenant_id = self._get_tenant_id_for_create(context, ret_port)
# populate port_security setting
if psec.PORTSECURITY not in port['port']:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# validate port security and allowed address pairs
if not ret_port[psec.PORTSECURITY]:
# has address pairs in request
if has_addr_pairs:
raise addr_pair.AddressPairAndPortSecurityRequired()
elif not delete_addr_pairs:
# check if address pairs are in db
ret_port[addr_pair.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id))
if ret_port[addr_pair.ADDRESS_PAIRS]:
raise addr_pair.AddressPairAndPortSecurityRequired()
if (delete_addr_pairs or has_addr_pairs):
# delete address pairs and read them in
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port, ret_port[addr_pair.ADDRESS_PAIRS])
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if not (has_ip and ret_port[psec.PORTSECURITY]):
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Update did not have security groups passed in. Check
# that port does not have any security groups already on it.
filters = {'port_id': [id]}
security_groups = (
super(NsxPluginV2, self)._get_port_security_group_bindings(
context, filters)
)
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
self._process_port_create_security_group(context, ret_port,
sgids)
if psec.PORTSECURITY in port['port']:
self._process_port_port_security_update(
context, port['port'], ret_port)
port_queue_id = self._check_for_queue_and_create(
context, ret_port)
# Populate the mac learning attribute
new_mac_learning_state = port['port'].get(mac_ext.MAC_LEARNING)
if (new_mac_learning_state is not None and
old_mac_learning_state != new_mac_learning_state):
self._update_mac_learning_state(context, id,
new_mac_learning_state)
ret_port[mac_ext.MAC_LEARNING] = new_mac_learning_state
self._delete_port_queue_mapping(context, ret_port['id'])
self._process_port_queue_mapping(context, ret_port,
port_queue_id)
LOG.debug("Updating port: %s", port)
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, id)
# Convert Neutron security groups identifiers into NSX security
# profiles identifiers
nsx_sec_profile_ids = [
nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, neutron_sg_id) for
neutron_sg_id in (ret_port[ext_sg.SECURITYGROUPS] or [])]
if nsx_port_id:
try:
switchlib.update_port(
self.cluster, nsx_switch_id, nsx_port_id,
id, tenant_id,
ret_port['name'],
ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY],
nsx_sec_profile_ids,
ret_port[qos.QUEUE],
ret_port.get(mac_ext.MAC_LEARNING),
ret_port.get(addr_pair.ADDRESS_PAIRS))
# Update the port status from nsx. If we fail here hide it
# since the port was successfully updated but we were not
# able to retrieve the status.
ret_port['status'] = switchlib.get_port_status(
self.cluster, nsx_switch_id,
nsx_port_id)
# FIXME(arosen) improve exception handling.
except Exception:
ret_port['status'] = constants.PORT_STATUS_ERROR
LOG.exception(_LE("Unable to update port id: %s."),
nsx_port_id)
# If nsx_port_id is not in database or in nsx put in error state.
else:
ret_port['status'] = constants.PORT_STATUS_ERROR
self._process_portbindings_create_and_update(context,
port['port'],
ret_port)
return ret_port
def delete_port(self, context, id, l3_port_check=True,
nw_gw_port_check=True):
"""Deletes a port on a specified Virtual Network.
If the port contains a remote interface attachment, the remote
interface is first un-plugged and then the port is deleted.
:returns: None
:raises: exception.PortInUse
:raises: exception.PortNotFound
:raises: exception.NetworkNotFound
"""
# if needed, check to see if this is a port owned by
# a l3 router. If so, we should prevent deletion here
if l3_port_check:
self.prevent_l3_port_deletion(context, id)
neutron_db_port = self.get_port(context, id)
# Perform the same check for ports owned by layer-2 gateways
if nw_gw_port_check:
self.prevent_network_gateway_port_deletion(context,
neutron_db_port)
port_delete_func = self._port_drivers['delete'].get(
neutron_db_port['device_owner'],
self._port_drivers['delete']['default'])
port_delete_func(context, neutron_db_port)
self.disassociate_floatingips(context, id)
with context.session.begin(subtransactions=True):
queue = self._get_port_queue_bindings(context, {'port_id': [id]})
# metadata_dhcp_host_route
self.handle_port_metadata_access(
context, neutron_db_port, is_delete=True)
super(NsxPluginV2, self).delete_port(context, id)
# Delete qos queue if possible
if queue:
self.delete_qos_queue(context, queue[0]['queue_id'], False)
self.handle_port_dhcp_access(
context, neutron_db_port, action='delete_port')
def get_port(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
if (self.nsx_sync_opts.always_read_status or
fields and 'status' in fields):
# Perform explicit state synchronization
db_port = self._get_port(context, id)
self._synchronizer.synchronize_port(
context, db_port)
return self._make_port_dict(db_port, fields)
else:
return super(NsxPluginV2, self).get_port(context, id, fields)
def get_router(self, context, id, fields=None):
if (self.nsx_sync_opts.always_read_status or
fields and 'status' in fields):
db_router = self._get_router(context, id)
# Perform explicit state synchronization
self._synchronizer.synchronize_router(
context, db_router)
return self._make_router_dict(db_router, fields)
else:
return super(NsxPluginV2, self).get_router(context, id, fields)
def _create_lrouter(self, context, router, nexthop):
tenant_id = self._get_tenant_id_for_create(context, router)
distributed = router.get('distributed')
try:
lrouter = routerlib.create_lrouter(
self.cluster, router['id'],
tenant_id, router['name'], nexthop,
distributed=attr.is_attr_set(distributed) and distributed)
except nsx_exc.InvalidVersion:
msg = _("Cannot create a distributed router with the NSX "
"platform currently in execution. Please, try "
"without specifying the 'distributed' attribute.")
LOG.exception(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
except api_exc.NsxApiException:
err_msg = _("Unable to create logical router on NSX Platform")
LOG.exception(err_msg)
raise nsx_exc.NsxPluginException(err_msg=err_msg)
# Create the port here - and update it later if we have gw_info
try:
self._create_and_attach_router_port(
self.cluster, context, lrouter['uuid'], {'fake_ext_gw': True},
"L3GatewayAttachment",
self.cluster.default_l3_gw_service_uuid)
except nsx_exc.NsxPluginException:
LOG.exception(_LE("Unable to create L3GW port on logical router "
"%(router_uuid)s. Verify Default Layer-3 "
"Gateway service %(def_l3_gw_svc)s id is "
"correct"),
{'router_uuid': lrouter['uuid'],
'def_l3_gw_svc':
self.cluster.default_l3_gw_service_uuid})
# Try and remove logical router from NSX
routerlib.delete_lrouter(self.cluster, lrouter['uuid'])
# Return user a 500 with an apter message
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to create router %s on NSX backend") %
router['id']))
lrouter['status'] = plugin_const.ACTIVE
return lrouter
def create_router(self, context, router):
# NOTE(salvatore-orlando): We completely override this method in
# order to be able to use the NSX ID as Neutron ID
# TODO(salvatore-orlando): Propose upstream patch for allowing
# 3rd parties to specify IDs as we do with l2 plugin
r = router['router']
has_gw_info = False
tenant_id = self._get_tenant_id_for_create(context, r)
# default value to set - nsx wants it (even if we don't have it)
nexthop = NSX_DEFAULT_NEXTHOP
# if external gateway info are set, then configure nexthop to
# default external gateway
if 'external_gateway_info' in r and r.get('external_gateway_info'):
has_gw_info = True
gw_info = r['external_gateway_info']
del r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NSX router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
# NOTE(salv-orlando): Pre-generating uuid for Neutron
# router. This will be removed once the router create operation
# becomes an asynchronous task
neutron_router_id = str(uuid.uuid4())
r['id'] = neutron_router_id
lrouter = self._create_lrouter(context, r, nexthop)
# Update 'distributed' with value returned from NSX
# This will be useful for setting the value if the API request
# did not specify any value for the 'distributed' attribute
# Platforms older than 3.x do not support the attribute
r['distributed'] = lrouter.get('distributed', False)
# TODO(salv-orlando): Deal with backend object removal in case
# of db failures
with context.session.begin(subtransactions=True):
# Transaction nesting is needed to avoid foreign key violations
# when processing the distributed router binding
with context.session.begin(subtransactions=True):
router_db = l3_db.Router(id=neutron_router_id,
tenant_id=tenant_id,
name=r['name'],
admin_state_up=r['admin_state_up'],
status=lrouter['status'])
context.session.add(router_db)
self._process_extra_attr_router_create(context, router_db, r)
# Ensure neutron router is moved into the transaction's buffer
context.session.flush()
# Add mapping between neutron and nsx identifiers
nsx_db.add_neutron_nsx_router_mapping(
context.session, router_db['id'], lrouter['uuid'])
if has_gw_info:
# NOTE(salv-orlando): This operation has been moved out of the
# database transaction since it performs several NSX queries,
# ithis ncreasing the risk of deadlocks between eventlet and
# sqlalchemy operations.
# Set external gateway and remove router in case of failure
try:
self._update_router_gw_info(context, router_db['id'], gw_info)
except (n_exc.NeutronException, api_exc.NsxApiException):
with excutils.save_and_reraise_exception():
# As setting gateway failed, the router must be deleted
# in order to ensure atomicity
router_id = router_db['id']
LOG.warn(_LW("Failed to set gateway info for router being "
"created:%s - removing router"), router_id)
self.delete_router(context, router_id)
LOG.info(_LI("Create router failed while setting external "
"gateway. Router:%s has been removed from "
"DB and backend"),
router_id)
return self._make_router_dict(router_db)
def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
return routerlib.update_lrouter(
self.cluster, nsx_router_id, name,
nexthop, routes=routes)
def _update_lrouter_routes(self, context, router_id, routes):
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
routerlib.update_explicit_routes_lrouter(
self.cluster, nsx_router_id, routes)
def update_router(self, context, router_id, router):
# Either nexthop is updated or should be kept as it was before
r = router['router']
nexthop = None
if 'external_gateway_info' in r and r.get('external_gateway_info'):
gw_info = r['external_gateway_info']
# The following DB read will be performed again when updating
# gateway info. This is not great, but still better than
# creating NSX router here and updating it later
network_id = (gw_info.get('network_id', None) if gw_info
else None)
if network_id:
ext_net = self._get_network(context, network_id)
if not ext_net.external:
msg = (_("Network '%s' is not a valid external "
"network") % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_net.subnets:
ext_subnet = ext_net.subnets[0]
nexthop = ext_subnet.gateway_ip
try:
for route in r.get('routes', []):
if route['destination'] == '0.0.0.0/0':
msg = _("'routes' cannot contain route '0.0.0.0/0', "
"this must be updated through the default "
"gateway attribute")
raise n_exc.BadRequest(resource='router', msg=msg)
previous_routes = self._update_lrouter(
context, router_id, r.get('name'),
nexthop, routes=r.get('routes'))
# NOTE(salv-orlando): The exception handling below is not correct, but
# unfortunately nsxlib raises a neutron notfound exception when an
# object is not found in the underlying backend
except n_exc.NotFound:
# Put the router in ERROR status
with context.session.begin(subtransactions=True):
router_db = self._get_router(context, router_id)
router_db['status'] = constants.NET_STATUS_ERROR
raise nsx_exc.NsxPluginException(
err_msg=_("Logical router %s not found "
"on NSX Platform") % router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=_("Unable to update logical router on NSX Platform"))
except nsx_exc.InvalidVersion:
msg = _("Request cannot contain 'routes' with the NSX "
"platform currently in execution. Please, try "
"without specifying the static routes.")
LOG.exception(msg)
raise n_exc.BadRequest(resource='router', msg=msg)
try:
return super(NsxPluginV2, self).update_router(context,
router_id, router)
except (extraroute.InvalidRoutes,
extraroute.RouterInterfaceInUseByRoute,
extraroute.RoutesExhausted):
with excutils.save_and_reraise_exception():
# revert changes made to NSX
self._update_lrouter_routes(
context, router_id, previous_routes)
def _delete_lrouter(self, context, router_id, nsx_router_id):
# The neutron router id (router_id) is ignored in this routine,
# but used in plugins deriving from this one
routerlib.delete_lrouter(self.cluster, nsx_router_id)
def delete_router(self, context, router_id):
with context.session.begin(subtransactions=True):
# TODO(salv-orlando): This call should have no effect on delete
# router, but if it does, it should not happen within a
# transaction, and it should be restored on rollback
self.handle_router_metadata_access(
context, router_id, interface=None)
# Pre-delete checks
# NOTE(salv-orlando): These checks will be repeated anyway when
# calling the superclass. This is wasteful, but is the simplest
# way of ensuring a consistent removal of the router both in
# the neutron Database and in the NSX backend.
# TODO(salv-orlando): split pre-delete checks and actual
# deletion in superclass.
# Ensure that the router is not used
fips = self.get_floatingips_count(
context.elevated(), filters={'router_id': [router_id]})
if fips:
raise l3.RouterInUse(router_id=router_id)
device_filter = {'device_id': [router_id],
'device_owner': [l3_db.DEVICE_OWNER_ROUTER_INTF]}
ports = self._core_plugin.get_ports_count(context.elevated(),
filters=device_filter)
if ports:
raise l3.RouterInUse(router_id=router_id)
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
# It is safe to remove the router from the database, so remove it
# from the backend
try:
self._delete_lrouter(context, router_id, nsx_router_id)
except n_exc.NotFound:
# This is not a fatal error, but needs to be logged
LOG.warning(_LW("Logical router '%s' not found "
"on NSX Platform"), router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to delete logical router '%s' "
"on NSX Platform") % nsx_router_id))
# Remove the NSX mapping first in order to ensure a mapping to
# a non-existent NSX router is not left in the DB in case of
# failure while removing the router from the neutron DB
try:
nsx_db.delete_neutron_nsx_router_mapping(
context.session, router_id)
except db_exc.DBError as d_exc:
# Do not make this error fatal
LOG.warn(_LW("Unable to remove NSX mapping for Neutron router "
"%(router_id)s because of the following exception:"
"%(d_exc)s"), {'router_id': router_id,
'd_exc': str(d_exc)})
# Perform the actual delete on the Neutron DB
super(NsxPluginV2, self).delete_router(context, router_id)
def _add_subnet_snat_rule(self, context, router, subnet):
gw_port = router.gw_port
if gw_port and router.enable_snat:
# There is a change gw_port might have multiple IPs
# In that case we will consider only the first one
if gw_port.get('fixed_ips'):
snat_ip = gw_port['fixed_ips'][0]['ip_address']
cidr_prefix = int(subnet['cidr'].split('/')[1])
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router['id'])
routerlib.create_lrouter_snat_rule(
self.cluster, nsx_router_id, snat_ip, snat_ip,
order=NSX_EXTGW_NAT_RULES_ORDER - cidr_prefix,
match_criteria={'source_ip_addresses': subnet['cidr']})
def _delete_subnet_snat_rule(self, context, router, subnet):
# Remove SNAT rule if external gateway is configured
if router.gw_port:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router['id'])
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "SourceNatRule",
max_num_expected=1, min_num_expected=1,
raise_on_len_mismatch=False,
source_ip_addresses=subnet['cidr'])
def add_router_interface(self, context, router_id, interface_info):
# When adding interface by port_id we need to create the
# peer port on the nsx logical router in this routine
port_id = interface_info.get('port_id')
router_iface_info = super(NsxPluginV2, self).add_router_interface(
context, router_id, interface_info)
# router_iface_info will always have a subnet_id attribute
subnet_id = router_iface_info['subnet_id']
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
if port_id:
port_data = self.get_port(context, port_id)
# If security groups are present we need to remove them as
# this is a router port and disable port security.
if port_data['security_groups']:
self.update_port(context, port_id,
{'port': {'security_groups': [],
psec.PORTSECURITY: False}})
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_id)
# Unplug current attachment from lswitch port
switchlib.plug_vif_interface(self.cluster, nsx_switch_id,
nsx_port_id, "NoAttachment")
# Create logical router port and plug patch attachment
self._create_and_attach_router_port(
self.cluster, context, nsx_router_id, port_data,
"PatchAttachment", nsx_port_id, subnet_ids=[subnet_id])
subnet = self._get_subnet(context, subnet_id)
# If there is an external gateway we need to configure the SNAT rule.
# Fetch router from DB
router = self._get_router(context, router_id)
self._add_subnet_snat_rule(context, router, subnet)
routerlib.create_lrouter_nosnat_rule(
self.cluster, nsx_router_id,
order=NSX_NOSNAT_RULES_ORDER,
match_criteria={'destination_ip_addresses': subnet['cidr']})
# Ensure the NSX logical router has a connection to a 'metadata access'
# network (with a proxy listening on its DHCP port), by creating it
# if needed.
self.handle_router_metadata_access(
context, router_id, interface=router_iface_info)
LOG.debug("Add_router_interface completed for subnet:%(subnet_id)s "
"and router:%(router_id)s",
{'subnet_id': subnet_id, 'router_id': router_id})
return router_iface_info
def remove_router_interface(self, context, router_id, interface_info):
# The code below is duplicated from base class, but comes handy
# as we need to retrieve the router port id before removing the port
subnet = None
subnet_id = None
if 'port_id' in interface_info:
port_id = interface_info['port_id']
# find subnet_id - it is need for removing the SNAT rule
port = self._get_port(context, port_id)
if port.get('fixed_ips'):
subnet_id = port['fixed_ips'][0]['subnet_id']
if not (port['device_owner'] == l3_db.DEVICE_OWNER_ROUTER_INTF and
port['device_id'] == router_id):
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
elif 'subnet_id' in interface_info:
subnet_id = interface_info['subnet_id']
subnet = self._get_subnet(context, subnet_id)
rport_qry = context.session.query(models_v2.Port)
ports = rport_qry.filter_by(
device_id=router_id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
network_id=subnet['network_id'])
for p in ports:
if p['fixed_ips'][0]['subnet_id'] == subnet_id:
port_id = p['id']
break
else:
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
# Finally remove the data from the Neutron DB
# This will also destroy the port on the logical switch
info = super(NsxPluginV2, self).remove_router_interface(
context, router_id, interface_info)
try:
# Ensure the connection to the 'metadata access network'
# is removed (with the network) if this the last subnet
# on the router
self.handle_router_metadata_access(
context, router_id, interface=info)
if not subnet:
subnet = self._get_subnet(context, subnet_id)
router = self._get_router(context, router_id)
# If router is enabled_snat = False there are no snat rules to
# delete.
if router.enable_snat:
self._delete_subnet_snat_rule(context, router, subnet)
# Relax the minimum expected number as the nosnat rules
# do not exist in 2.x deployments
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "NoSourceNatRule",
max_num_expected=1, min_num_expected=0,
raise_on_len_mismatch=False,
destination_ip_addresses=subnet['cidr'])
except n_exc.NotFound:
LOG.error(_LE("Logical router resource %s not found "
"on NSX platform"), router_id)
except api_exc.NsxApiException:
raise nsx_exc.NsxPluginException(
err_msg=(_("Unable to update logical router"
"on NSX Platform")))
return info
def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
internal_ip, nsx_router_id,
min_num_rules_expected=0):
"""Finds and removes NAT rules from a NSX router."""
# NOTE(salv-orlando): The context parameter is ignored in this method
# but used by derived classes
try:
# Remove DNAT rule for the floating IP
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "DestinationNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=floating_ip_address)
# Remove SNAT rules for the floating IP
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
source_ip_addresses=internal_ip)
routerlib.delete_nat_rules_by_match(
self.cluster, nsx_router_id, "SourceNatRule",
max_num_expected=1,
min_num_expected=min_num_rules_expected,
destination_ip_addresses=internal_ip)
except api_exc.NsxApiException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("An error occurred while removing NAT rules "
"on the NSX platform for floating ip:%s"),
floating_ip_address)
except nsx_exc.NatRuleMismatch:
# Do not surface to the user
LOG.warning(_LW("An incorrect number of matching NAT rules "
"was found on the NSX platform"))
def _remove_floatingip_address(self, context, fip_db):
# Remove floating IP address from logical router port
# Fetch logical port of router's external gateway
router_id = fip_db.router_id
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
nsx_gw_port_id = routerlib.find_router_gw_port(
context, self.cluster, nsx_router_id)['uuid']
ext_neutron_port_db = self._get_port(context.elevated(),
fip_db.floating_port_id)
nsx_floating_ips = self._build_ip_address_list(
context.elevated(), ext_neutron_port_db['fixed_ips'])
routerlib.update_lrouter_port_ips(self.cluster,
nsx_router_id,
nsx_gw_port_id,
ips_to_add=[],
ips_to_remove=nsx_floating_ips)
def _get_fip_assoc_data(self, context, fip, floatingip_db):
if (('fixed_ip_address' in fip and fip['fixed_ip_address']) and
not ('port_id' in fip and fip['port_id'])):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
port_id = internal_ip = router_id = None
if 'port_id' in fip and fip['port_id']:
fip_qry = context.session.query(l3_db.FloatingIP)
port_id, internal_ip, router_id = self.get_assoc_data(
context,
fip,
floatingip_db['floating_network_id'])
try:
fip_qry.filter_by(
fixed_port_id=fip['port_id'],
floating_network_id=floatingip_db['floating_network_id'],
fixed_ip_address=internal_ip).one()
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_db['id'],
floating_ip_address=floatingip_db['floating_ip_address'],
fixed_ip=floatingip_db['fixed_ip_address'],
net_id=floatingip_db['floating_network_id'])
except sa_exc.NoResultFound:
pass
return (port_id, internal_ip, router_id)
def _floatingip_status(self, floatingip_db, associated):
if (associated and
floatingip_db['status'] != constants.FLOATINGIP_STATUS_ACTIVE):
return constants.FLOATINGIP_STATUS_ACTIVE
elif (not associated and
floatingip_db['status'] != constants.FLOATINGIP_STATUS_DOWN):
return constants.FLOATINGIP_STATUS_DOWN
# in any case ensure the status is not reset by this method!
return floatingip_db['status']
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
"""Update floating IP association data.
Overrides method from base class.
The method is augmented for creating NAT rules in the process.
"""
# Store router currently serving the floating IP
old_router_id = floatingip_db.router_id
port_id, internal_ip, router_id = self._get_fip_assoc_data(
context, fip, floatingip_db)
floating_ip = floatingip_db['floating_ip_address']
# If there's no association router_id will be None
if router_id:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, router_id)
self._retrieve_and_delete_nat_rules(
context, floating_ip, internal_ip, nsx_router_id)
# Fetch logical port of router's external gateway
# Fetch logical port of router's external gateway
nsx_floating_ips = self._build_ip_address_list(
context.elevated(), external_port['fixed_ips'])
floating_ip = floatingip_db['floating_ip_address']
# Retrieve and delete existing NAT rules, if any
if old_router_id:
nsx_old_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, old_router_id)
# Retrieve the current internal ip
_p, _s, old_internal_ip = self._internal_fip_assoc_data(
context, {'id': floatingip_db.id,
'port_id': floatingip_db.fixed_port_id,
'fixed_ip_address': floatingip_db.fixed_ip_address,
'tenant_id': floatingip_db.tenant_id})
nsx_gw_port_id = routerlib.find_router_gw_port(
context, self.cluster, nsx_old_router_id)['uuid']
self._retrieve_and_delete_nat_rules(
context, floating_ip, old_internal_ip, nsx_old_router_id)
routerlib.update_lrouter_port_ips(
self.cluster, nsx_old_router_id, nsx_gw_port_id,
ips_to_add=[], ips_to_remove=nsx_floating_ips)
if router_id:
nsx_gw_port_id = routerlib.find_router_gw_port(
context, self.cluster, nsx_router_id)['uuid']
# Re-create NAT rules only if a port id is specified
if fip.get('port_id'):
try:
# Setup DNAT rules for the floating IP
routerlib.create_lrouter_dnat_rule(
self.cluster, nsx_router_id, internal_ip,
order=NSX_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'destination_ip_addresses':
floating_ip})
# Setup SNAT rules for the floating IP
# Create a SNAT rule for enabling connectivity to the
# floating IP from the same network as the internal port
# Find subnet id for internal_ip from fixed_ips
internal_port = self._get_port(context, port_id)
# Cchecks not needed on statements below since otherwise
# _internal_fip_assoc_data would have raised
subnet_ids = [ip['subnet_id'] for ip in
internal_port['fixed_ips'] if
ip['ip_address'] == internal_ip]
internal_subnet_cidr = self._build_ip_address_list(
context, internal_port['fixed_ips'],
subnet_ids=subnet_ids)[0]
routerlib.create_lrouter_snat_rule(
self.cluster, nsx_router_id, floating_ip, floating_ip,
order=NSX_NOSNAT_RULES_ORDER - 1,
match_criteria={'source_ip_addresses':
internal_subnet_cidr,
'destination_ip_addresses':
internal_ip})
# setup snat rule such that src ip of a IP packet when
# using floating is the floating ip itself.
routerlib.create_lrouter_snat_rule(
self.cluster, nsx_router_id, floating_ip, floating_ip,
order=NSX_FLOATINGIP_NAT_RULES_ORDER,
match_criteria={'source_ip_addresses': internal_ip})
# Add Floating IP address to router_port
routerlib.update_lrouter_port_ips(
self.cluster, nsx_router_id, nsx_gw_port_id,
ips_to_add=nsx_floating_ips, ips_to_remove=[])
except api_exc.NsxApiException:
LOG.exception(_LE("An error occurred while creating NAT "
"rules on the NSX platform for floating "
"ip:%(floating_ip)s mapped to "
"internal ip:%(internal_ip)s"),
{'floating_ip': floating_ip,
'internal_ip': internal_ip})
msg = _("Failed to update NAT rules for floatingip update")
raise nsx_exc.NsxPluginException(err_msg=msg)
# Update also floating ip status (no need to call base class method)
floatingip_db.update(
{'fixed_ip_address': internal_ip,
'fixed_port_id': port_id,
'router_id': router_id,
'status': self._floatingip_status(floatingip_db, router_id)})
def delete_floatingip(self, context, id):
fip_db = self._get_floatingip(context, id)
# Check whether the floating ip is associated or not
if fip_db.fixed_port_id:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, fip_db.router_id)
self._retrieve_and_delete_nat_rules(context,
fip_db.floating_ip_address,
fip_db.fixed_ip_address,
nsx_router_id,
min_num_rules_expected=1)
# Remove floating IP address from logical router port
self._remove_floatingip_address(context, fip_db)
return super(NsxPluginV2, self).delete_floatingip(context, id)
def disassociate_floatingips(self, context, port_id):
try:
fip_qry = context.session.query(l3_db.FloatingIP)
fip_dbs = fip_qry.filter_by(fixed_port_id=port_id)
for fip_db in fip_dbs:
nsx_router_id = nsx_utils.get_nsx_router_id(
context.session, self.cluster, fip_db.router_id)
self._retrieve_and_delete_nat_rules(context,
fip_db.floating_ip_address,
fip_db.fixed_ip_address,
nsx_router_id,
min_num_rules_expected=1)
self._remove_floatingip_address(context, fip_db)
except sa_exc.NoResultFound:
LOG.debug("The port '%s' is not associated with floating IPs",
port_id)
except n_exc.NotFound:
LOG.warning(_LW("Nat rules not found in nsx for port: %s"), id)
# NOTE(ihrachys): L3 agent notifications don't make sense for
# NSX VMWare plugin since there is no L3 agent in such setup, so
# disabling them here.
super(NsxPluginV2, self).disassociate_floatingips(
context, port_id, do_notify=False)
def create_network_gateway(self, context, network_gateway):
"""Create a layer-2 network gateway.
Create the gateway service on NSX platform and corresponding data
structures in Neutron datase.
"""
gw_data = network_gateway[networkgw.GATEWAY_RESOURCE_NAME]
tenant_id = self._get_tenant_id_for_create(context, gw_data)
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Validate provided gateway device list
self._validate_device_list(context, tenant_id, gw_data)
devices = gw_data['devices']
# Populate default physical network where not specified
for device in devices:
if not device.get('interface_name'):
device['interface_name'] = self.cluster.default_interface_name
try:
# Replace Neutron device identifiers with NSX identifiers
dev_map = dict((dev['id'], dev['interface_name']) for
dev in devices)
nsx_devices = []
for db_device in self._query_gateway_devices(
context, filters={'id': [device['id'] for device in devices]}):
nsx_devices.append(
{'id': db_device['nsx_id'],
'interface_name': dev_map[db_device['id']]})
nsx_res = l2gwlib.create_l2_gw_service(
self.cluster, tenant_id, gw_data['name'], nsx_devices)
nsx_uuid = nsx_res.get('uuid')
except api_exc.Conflict:
raise nsx_exc.L2GatewayAlreadyInUse(gateway=gw_data['name'])
except api_exc.NsxApiException:
err_msg = _("Unable to create l2_gw_service for: %s") % gw_data
LOG.exception(err_msg)
raise nsx_exc.NsxPluginException(err_msg=err_msg)
gw_data['id'] = nsx_uuid
return super(NsxPluginV2, self).create_network_gateway(
context, network_gateway, validate_device_list=False)
def delete_network_gateway(self, context, gateway_id):
"""Remove a layer-2 network gateway.
Remove the gateway service from NSX platform and corresponding data
structures in Neutron datase.
"""
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
with context.session.begin(subtransactions=True):
try:
super(NsxPluginV2, self).delete_network_gateway(
context, gateway_id)
l2gwlib.delete_l2_gw_service(self.cluster, gateway_id)
except api_exc.ResourceNotFound:
# Do not cause a 500 to be returned to the user if
# the corresponding NSX resource does not exist
LOG.exception(_LE("Unable to remove gateway service from "
"NSX plaform - the resource was not found"))
def get_network_gateway(self, context, id, fields=None):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NsxPluginV2, self).get_network_gateway(context,
id, fields)
def get_network_gateways(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Ensure the tenant_id attribute is populated on returned gateways
return super(NsxPluginV2, self).get_network_gateways(
context, filters, fields, sorts, limit, marker, page_reverse)
def update_network_gateway(self, context, id, network_gateway):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
# Update gateway on backend when there's a name change
name = network_gateway[networkgw.GATEWAY_RESOURCE_NAME].get('name')
if name:
try:
l2gwlib.update_l2_gw_service(self.cluster, id, name)
except api_exc.NsxApiException:
# Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on
LOG.warn(_LW("Unable to update name on NSX backend "
"for network gateway: %s"), id)
return super(NsxPluginV2, self).update_network_gateway(
context, id, network_gateway)
def connect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
try:
return super(NsxPluginV2, self).connect_network(
context, network_gateway_id, network_mapping_info)
except api_exc.Conflict:
raise nsx_exc.L2GatewayAlreadyInUse(gateway=network_gateway_id)
def disconnect_network(self, context, network_gateway_id,
network_mapping_info):
# Ensure the default gateway in the config file is in sync with the db
self._ensure_default_network_gateway()
return super(NsxPluginV2, self).disconnect_network(
context, network_gateway_id, network_mapping_info)
def _get_nsx_device_id(self, context, device_id):
return self._get_gateway_device(context, device_id)['nsx_id']
def _rollback_gw_device(self, context, device_id, gw_data=None,
new_status=None, is_create=False):
LOG.error(_LE("Rolling back database changes for gateway device %s "
"because of an error in the NSX backend"), device_id)
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == device_id)
if is_create:
query.delete(synchronize_session=False)
else:
super(NsxPluginV2, self).update_gateway_device(
context, device_id,
{networkgw.DEVICE_RESOURCE_NAME: gw_data})
if new_status:
query.update({'status': new_status},
synchronize_session=False)
# TODO(salv-orlando): Handlers for Gateway device operations should be
# moved into the appropriate nsx_handlers package once the code for the
# blueprint nsx-async-backend-communication merges
def create_gateway_device_handler(self, context, gateway_device,
client_certificate):
neutron_id = gateway_device['id']
try:
nsx_res = l2gwlib.create_gateway_device(
self.cluster,
gateway_device['tenant_id'],
gateway_device['name'],
neutron_id,
self.cluster.default_tz_uuid,
gateway_device['connector_type'],
gateway_device['connector_ip'],
client_certificate)
# Fetch status (it needs another NSX API call)
device_status = nsx_utils.get_nsx_device_status(self.cluster,
nsx_res['uuid'])
# set NSX GW device in neutron database and update status
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == neutron_id)
query.update({'status': device_status,
'nsx_id': nsx_res['uuid']},
synchronize_session=False)
LOG.debug("Neutron gateway device: %(neutron_id)s; "
"NSX transport node identifier: %(nsx_id)s; "
"Operational status: %(status)s.",
{'neutron_id': neutron_id,
'nsx_id': nsx_res['uuid'],
'status': device_status})
return device_status
except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException):
with excutils.save_and_reraise_exception():
self._rollback_gw_device(context, neutron_id, is_create=True)
def update_gateway_device_handler(self, context, gateway_device,
old_gateway_device_data,
client_certificate):
nsx_id = gateway_device['nsx_id']
neutron_id = gateway_device['id']
try:
l2gwlib.update_gateway_device(
self.cluster,
nsx_id,
gateway_device['tenant_id'],
gateway_device['name'],
neutron_id,
self.cluster.default_tz_uuid,
gateway_device['connector_type'],
gateway_device['connector_ip'],
client_certificate)
# Fetch status (it needs another NSX API call)
device_status = nsx_utils.get_nsx_device_status(self.cluster,
nsx_id)
# update status
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == neutron_id)
query.update({'status': device_status},
synchronize_session=False)
LOG.debug("Neutron gateway device: %(neutron_id)s; "
"NSX transport node identifier: %(nsx_id)s; "
"Operational status: %(status)s.",
{'neutron_id': neutron_id,
'nsx_id': nsx_id,
'status': device_status})
return device_status
except (nsx_exc.InvalidSecurityCertificate, api_exc.NsxApiException):
with excutils.save_and_reraise_exception():
self._rollback_gw_device(context, neutron_id,
gw_data=old_gateway_device_data)
except n_exc.NotFound:
# The gateway device was probably deleted in the backend.
# The DB change should be rolled back and the status must
# be put in error
with excutils.save_and_reraise_exception():
self._rollback_gw_device(context, neutron_id,
gw_data=old_gateway_device_data,
new_status=networkgw_db.ERROR)
def get_gateway_device(self, context, device_id, fields=None):
# Get device from database
gw_device = super(NsxPluginV2, self).get_gateway_device(
context, device_id, fields, include_nsx_id=True)
# Fetch status from NSX
nsx_id = gw_device['nsx_id']
device_status = nsx_utils.get_nsx_device_status(self.cluster, nsx_id)
# TODO(salv-orlando): Asynchronous sync for gateway device status
# Update status in database
with context.session.begin(subtransactions=True):
query = self._model_query(
context, networkgw_db.NetworkGatewayDevice).filter(
networkgw_db.NetworkGatewayDevice.id == device_id)
query.update({'status': device_status},
synchronize_session=False)
gw_device['status'] = device_status
return gw_device
def get_gateway_devices(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
# Get devices from database
devices = super(NsxPluginV2, self).get_gateway_devices(
context, filters, fields, include_nsx_id=True)
# Fetch operational status from NSX, filter by tenant tag
# TODO(salv-orlando): Asynchronous sync for gateway device status
tenant_id = context.tenant_id if not context.is_admin else None
nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster,
tenant_id)
# Update statuses in database
with context.session.begin(subtransactions=True):
for device in devices:
new_status = nsx_statuses.get(device['nsx_id'])
if new_status:
device['status'] = new_status
return devices
def create_gateway_device(self, context, gateway_device):
# NOTE(salv-orlando): client-certificate will not be stored
# in the database
device_data = gateway_device[networkgw.DEVICE_RESOURCE_NAME]
client_certificate = device_data.pop('client_certificate')
gw_device = super(NsxPluginV2, self).create_gateway_device(
context, gateway_device)
# DB operation was successful, perform NSX operation
gw_device['status'] = self.create_gateway_device_handler(
context, gw_device, client_certificate)
return gw_device
def update_gateway_device(self, context, device_id,
gateway_device):
# NOTE(salv-orlando): client-certificate will not be stored
# in the database
client_certificate = (
gateway_device[networkgw.DEVICE_RESOURCE_NAME].pop(
'client_certificate', None))
# Retrive current state from DB in case a rollback should be needed
old_gw_device_data = super(NsxPluginV2, self).get_gateway_device(
context, device_id, include_nsx_id=True)
gw_device = super(NsxPluginV2, self).update_gateway_device(
context, device_id, gateway_device, include_nsx_id=True)
# DB operation was successful, perform NSX operation
gw_device['status'] = self.update_gateway_device_handler(
context, gw_device, old_gw_device_data, client_certificate)
gw_device.pop('nsx_id')
return gw_device
def delete_gateway_device(self, context, device_id):
nsx_device_id = self._get_nsx_device_id(context, device_id)
super(NsxPluginV2, self).delete_gateway_device(
context, device_id)
# DB operation was successful, perform NSX operation
# TODO(salv-orlando): State consistency with neutron DB
# should be ensured even in case of backend failures
try:
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
except n_exc.NotFound:
LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on "
"NSX backend (NSX id:%(nsx_id)s) because the NSX "
"resource was not found"),
{'neutron_id': device_id, 'nsx_id': nsx_device_id})
except api_exc.NsxApiException:
with excutils.save_and_reraise_exception():
# In this case a 500 should be returned
LOG.exception(_LE("Removal of gateway device: %(neutron_id)s "
"failed on NSX backend (NSX id:%(nsx_id)s). "
"Neutron and NSX states have diverged."),
{'neutron_id': device_id,
'nsx_id': nsx_device_id})
def create_security_group(self, context, security_group, default_sg=False):
"""Create security group.
If default_sg is true that means we are creating a default security
group and we don't need to check if one exists.
"""
s = security_group.get('security_group')
tenant_id = self._get_tenant_id_for_create(context, s)
if not default_sg:
self._ensure_default_security_group(context, tenant_id)
# NOTE(salv-orlando): Pre-generating Neutron ID for security group.
neutron_id = str(uuid.uuid4())
nsx_secgroup = secgrouplib.create_security_profile(
self.cluster, tenant_id, neutron_id, s)
with context.session.begin(subtransactions=True):
s['id'] = neutron_id
sec_group = super(NsxPluginV2, self).create_security_group(
context, security_group, default_sg)
context.session.flush()
# Add mapping between neutron and nsx identifiers
nsx_db.add_neutron_nsx_security_group_mapping(
context.session, neutron_id, nsx_secgroup['uuid'])
return sec_group
def update_security_group(self, context, secgroup_id, security_group):
secgroup = (super(NsxPluginV2, self).
update_security_group(context,
secgroup_id,
security_group))
if ('name' in security_group['security_group'] and
secgroup['name'] != 'default'):
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, secgroup_id)
try:
name = security_group['security_group']['name']
secgrouplib.update_security_profile(
self.cluster, nsx_sec_profile_id, name)
except (n_exc.NotFound, api_exc.NsxApiException) as e:
# Reverting the DB change is not really worthwhile
# for a mismatch between names. It's the rules that
# we care about.
LOG.error(_LE('Error while updating security profile '
'%(uuid)s with name %(name)s: %(error)s.'),
{'uuid': secgroup_id, 'name': name, 'error': e})
return secgroup
def delete_security_group(self, context, security_group_id):
"""Delete a security group.
:param security_group_id: security group rule to remove.
"""
with context.session.begin(subtransactions=True):
security_group = super(NsxPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
if security_group['name'] == 'default' and not context.is_admin:
raise ext_sg.SecurityGroupCannotRemoveDefault()
filters = {'security_group_id': [security_group['id']]}
if super(NsxPluginV2, self)._get_port_security_group_bindings(
context, filters):
raise ext_sg.SecurityGroupInUse(id=security_group['id'])
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, security_group_id)
try:
secgrouplib.delete_security_profile(
self.cluster, nsx_sec_profile_id)
except n_exc.NotFound:
# The security profile was not found on the backend
# do not fail in this case.
LOG.warning(_LW("The NSX security profile %(sec_profile_id)s, "
"associated with the Neutron security group "
"%(sec_group_id)s was not found on the "
"backend"),
{'sec_profile_id': nsx_sec_profile_id,
'sec_group_id': security_group_id})
except api_exc.NsxApiException:
# Raise and fail the operation, as there is a problem which
# prevented the sec group from being removed from the backend
LOG.exception(_LE("An exception occurred while removing the "
"NSX security profile %(sec_profile_id)s, "
"associated with Netron security group "
"%(sec_group_id)s"),
{'sec_profile_id': nsx_sec_profile_id,
'sec_group_id': security_group_id})
raise nsx_exc.NsxPluginException(
_("Unable to remove security group %s from backend"),
security_group['id'])
return super(NsxPluginV2, self).delete_security_group(
context, security_group_id)
def _validate_security_group_rules(self, context, rules):
for rule in rules['security_group_rules']:
r = rule.get('security_group_rule')
port_based_proto = (self._get_ip_proto_number(r['protocol'])
in securitygroups_db.IP_PROTOCOL_MAP.values())
if (not port_based_proto and
(r['port_range_min'] is not None or
r['port_range_max'] is not None)):
msg = (_("Port values not valid for "
"protocol: %s") % r['protocol'])
raise n_exc.BadRequest(resource='security_group_rule',
msg=msg)
return super(NsxPluginV2, self)._validate_security_group_rules(context,
rules)
def create_security_group_rule(self, context, security_group_rule):
"""Create a single security group rule."""
bulk_rule = {'security_group_rules': [security_group_rule]}
return self.create_security_group_rule_bulk(context, bulk_rule)[0]
def create_security_group_rule_bulk(self, context, security_group_rule):
"""Create security group rules.
:param security_group_rule: list of rules to create
"""
s = security_group_rule.get('security_group_rules')
# TODO(arosen) is there anyway we could avoid having the update of
# the security group rules in nsx outside of this transaction?
with context.session.begin(subtransactions=True):
security_group_id = self._validate_security_group_rules(
context, security_group_rule)
# Check to make sure security group exists
security_group = super(NsxPluginV2, self).get_security_group(
context, security_group_id)
if not security_group:
raise ext_sg.SecurityGroupNotFound(id=security_group_id)
# Check for duplicate rules
self._check_for_duplicate_rules(context, s)
# gather all the existing security group rules since we need all
# of them to PUT to NSX.
existing_rules = self.get_security_group_rules(
context, {'security_group_id': [security_group['id']]})
combined_rules = sg_utils.merge_security_group_rules_with_current(
context.session, self.cluster, s, existing_rules)
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, security_group_id)
secgrouplib.update_security_group_rules(self.cluster,
nsx_sec_profile_id,
combined_rules)
return super(
NsxPluginV2, self).create_security_group_rule_bulk_native(
context, security_group_rule)
def delete_security_group_rule(self, context, sgrid):
"""Delete a security group rule
:param sgrid: security group id to remove.
"""
with context.session.begin(subtransactions=True):
# determine security profile id
security_group_rule = (
super(NsxPluginV2, self).get_security_group_rule(
context, sgrid))
if not security_group_rule:
raise ext_sg.SecurityGroupRuleNotFound(id=sgrid)
sgid = security_group_rule['security_group_id']
current_rules = self.get_security_group_rules(
context, {'security_group_id': [sgid]})
current_rules_nsx = sg_utils.get_security_group_rules_nsx_format(
context.session, self.cluster, current_rules, True)
sg_utils.remove_security_group_with_id_and_id_field(
current_rules_nsx, sgrid)
nsx_sec_profile_id = nsx_utils.get_nsx_security_group_id(
context.session, self.cluster, sgid)
secgrouplib.update_security_group_rules(
self.cluster, nsx_sec_profile_id, current_rules_nsx)
return super(NsxPluginV2, self).delete_security_group_rule(context,
sgrid)
def create_qos_queue(self, context, qos_queue, check_policy=True):
q = qos_queue.get('qos_queue')
self._validate_qos_queue(context, q)
q['id'] = queuelib.create_lqueue(self.cluster, q)
return super(NsxPluginV2, self).create_qos_queue(context, qos_queue)
def delete_qos_queue(self, context, queue_id, raise_in_use=True):
filters = {'queue_id': [queue_id]}
queues = self._get_port_queue_bindings(context, filters)
if queues:
if raise_in_use:
raise qos.QueueInUseByPort()
else:
return
queuelib.delete_lqueue(self.cluster, queue_id)
return super(NsxPluginV2, self).delete_qos_queue(context, queue_id)
| apache-2.0 | -6,912,307,957,773,584,000 | 50.663462 | 79 | 0.549119 | false | 4.325216 | false | false | false |
kungpfui/pyCueSheet | src/cuejoin.py | 1 | 9187 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# $Id$
import os, sys, re
import time, math
import wave
import subprocess
import multiprocessing
class Decode:
""" Decoder """
def __init__(self, filename):
self.filename = filename
self.origin_filename = None
self.fileext = {
'.tta' : self.tta,
'.flac' : self.flac,
'.ape' : self.ape,
'.wv' : self.wv,
}
if not os.path.exists(filename):
# try to find an encoded file and decode it to wave format
for extension, dec_func in self.fileext.iteritems():
filename = os.path.splitext(filename)[0] + extension
if os.path.exists(filename):
print 'Decode:', filename
self.origin_filename = filename
dec_func()
break
def __del__(self):
if self.origin_filename:
os.remove(self.filename)
def ape(self):
subprocess.call(['mac',
self.origin_filename,
self.filename,
'-d',
] )
def flac(self):
subprocess.call(['flac',
'-d',
self.origin_filename,
] )
def tta(self):
subprocess.call(['ttaenc',
'-d',
self.origin_filename,
'.',
] )
def wv(self):
subprocess.call(['wvunpack',
self.origin_filename,
'.',
] )
class Track:
def __init__(self, track_index, file, parent):
# from parent
for member in ('cd_performer', 'cd_title', 'cd_date', 'cd_genre'):
setattr(self, member, getattr(parent, member))
self.file = file
self.title = ''
self.index = track_index
self.performer = self.cd_performer
self.time = { 1:0.0 }
def __str__(self):
return "{} - {} - {}".format(self.index, self.title, self.time)
class CueSheet:
def __init__(self, cue_sheet):
self.sheet = cue_sheet
self.cd_performer = ''
self.cd_title = ''
self.cd_genre = ''
self.cd_date = ''
self.current_file = ''
self.tracks = []
self.regex_lst = (
(re.compile(r'PERFORMER\s(.+)'), self.__performer),
(re.compile(r'REM DATE\s(.+)'), self.__date),
(re.compile(r'REM GENRE\s(.+)'), self.__genre),
(re.compile(r'TITLE\s(.+)'), self.__title),
(re.compile(r'FILE\s(.+)\sWAVE'), self.__file), # only wave
(re.compile(r'TRACK\s(\d{2})\sAUDIO'), self.__track), # only audio
(re.compile(r'INDEX\s(\d{2})\s(\d{1,3}:\d{2}:\d{2})'), self.__index),
)
def __performer(self, s):
if not self.tracks:
self.cd_performer = s
else:
self.tracks[-1].performer = s
def __title(self, s):
if not self.tracks:
self.cd_title = s
else:
self.tracks[-1].title = s
def __genre(self, s):
self.cd_genre = s
def __date(self, s):
self.cd_date = s
def __file(self, s):
self.current_file = s
def __track(self, s):
self.tracks.append( Track(s, self.current_file, self) )
@staticmethod
def index_split(s):
t = s.split(':')
return (int(t[0])*60 + int(t[1]))*75 + int(t[2])
@staticmethod
def dqstrip(s):
if s[0] == '"' and s[-1] == '"': return s[1:-1]
return s
@staticmethod
def unquote(t):
return tuple([CueSheet.dqstrip(s.strip()) for s in t])
def __index(self, idx, s):
idx = int(idx)
self.tracks[-1].time[idx] = self.index_split(s)
def read(self):
for line in open(self.sheet):
for regex, handler in self.regex_lst:
mobj = regex.match(line.strip())
if mobj:
#~ print mobj.group(1)
handler(*self.unquote(mobj.groups()))
#~ for x in self.tracks: print x
def split(self, encoders=None):
encoding_queue = multiprocessing.Queue(multiprocessing.cpu_count())
keep_alive = [] # a dummy object
for i, track in enumerate(self.tracks):
keep_alive.append( Decode(track.file) )
wafi = wave.open(track.file, 'rb')
param_names = ('nchannels', 'sampwidth', 'framerate', 'nframes', 'comptype', 'compname')
params = wafi.getparams()
param_dict = dict(zip(param_names, params))
#~ print param_dict['framerate']
# calculate number of frames
start = param_dict['framerate'] * track.time[1] // 75
stop = param_dict['nframes']
if len(self.tracks) > i+1 and self.tracks[i+1].file == track.file:
stop = int(param_dict['framerate'] * self.tracks[i+1].time.get(0, self.tracks[i+1].time[1])) // 75
trackfilename = ' - '.join((track.index, track.title)) + '.wav'
trackfilename = trackfilename.replace('?', '')
trackfilename = trackfilename.replace('/', '')
trackfilename = trackfilename.replace('\\', '')
trackfilename = trackfilename.replace(':', '')
if not os.path.exists(trackfilename):
wafi_write = wave.open(trackfilename, 'wb')
newparams = list(params)
newparams[3] = 0
wafi_write.setparams( tuple(newparams) )
wafi.setpos(start)
wafi_write.writeframes(wafi.readframes(stop-start))
wafi_write.close()
wafi.close()
# ogg encode it, queue is used for sync
for encode_to in encoders:
encoding_queue.put(trackfilename)
p = multiprocessing.Process(target=encode_to, args=(
encoding_queue,
trackfilename,
track
))
p.start()
# wait until all task are finished
while not encoding_queue.empty():
time.sleep(1.0)
keep_alive = None
def __str__(self):
output = 'REM COMMENT CUE JOIN\n'
if self.cd_genre: output += 'REM GENRE "{}"\n'.format(self.cd_genre)
if self.cd_date: output += 'REM DATE {}\n'.format(self.cd_date)
if self.cd_performer: output += 'PERFORMER "{}"\n'.format(self.cd_performer)
if self.cd_title: output += 'TITLE "{}"\n'.format(self.cd_title)
one_file = self.tracks[0].file == self.tracks[-1].file
if one_file: output += u'FILE "{}" WAVE\n'.format(self.current_file).encode('latin-1')
for i, track in enumerate(self.tracks):
output += ' TRACK {:02d} AUDIO\n'.format(i+1)
output += ' TITLE "{}"\n'.format(track.title)
if self.cd_performer != track.performer: output += ' PERFORMER "{}"\n'.format(track.performer)
if not one_file:
output += ' FILE "{}" WAVE\n'.format(track.file)
for idx in sorted(track.time.keys()):
t = track.time[idx]
#~ print t
mins = t // (60*75)
t -= mins * (60*75)
sec = t // 75
t -= sec * 75
rest = t
output += ' INDEX {:02d} {:02d}:{:02d}:{:02d}\n'.format(idx, int(mins), int(sec), rest)
return output
def __analyze_wave(self, trackfile):
wafi = wave.open(trackfile, 'rb')
param_names = ('nchannels', 'sampwidth', 'framerate', 'nframes', 'comptype', 'compname')
params = wafi.getparams()
param_dict = dict(zip(param_names, params))
wafi.close()
return param_dict, params
def join(self, cue_obj, wave_filename=u'join'):
self.current_file = wave_filename + u'.wav'
wafo = wave.open(self.current_file, 'wb')
set_params = True
for i, track in enumerate(self.tracks):
Decode(track.file)
if set_params:
set_params = False
pdict, param = self.__analyze_wave(track.file)
#~ print pdict['nframes'] / (pdict['framerate'] // 75)
wafo.setparams(param)
wafi = wave.open(track.file, 'rb')
pdict, param = self.__analyze_wave(track.file)
# calculate number of frames
start = pdict['framerate'] * track.time.get(0, track.time[1]) // 75
stop = pdict['nframes']
if len(self.tracks) > i+1 and self.tracks[i+1].file == track.file:
stop = pdict['framerate'] * self.tracks[i+1].time.get(0, self.tracks[i+1].time[1]) // 75
print start, stop, pdict['nframes']
wafi.setpos(start)
wafo.writeframes(wafi.readframes(stop-start))
wafi.close()
track.file = self.current_file
# second part
time_offset = pdict['nframes']*75 // pdict['framerate']
for i, track in enumerate(cue_obj.tracks):
Decode(track.file)
wafi = wave.open(track.file, 'rb')
pdict, param = self.__analyze_wave(track.file)
# calculate number of frames
start = pdict['framerate'] * track.time.get(0, track.time[1]) // 75
stop = pdict['nframes']
if len(cue_obj.tracks) > i+1 and cue_obj.tracks[i+1].file == track.file:
stop = pdict['framerate'] * cue_obj.tracks[i+1].time.get(0, cue_obj.tracks[i+1].time[1]) // 75
print start, stop, pdict['nframes']
wafi.setpos(start)
wafo.writeframes(wafi.readframes(stop-start))
wafi.close()
track.file = self.current_file
for key, value in cue_obj.tracks[i].time.iteritems():
cue_obj.tracks[i].time[key] = value + time_offset
self.tracks += cue_obj.tracks
with open(wave_filename+u'.cue', 'w') as f:
f.write( str(self) )
if __name__ == "__main__":
cue_files = []
for filename in os.listdir(u'.'):
if os.path.isfile(filename) and filename.lower().endswith(u'.cue'):
cue_files.append(filename)
cue_objs = []
joined_filename = None
for f in sorted(cue_files):
if not joined_filename: joined_filename = f
else:
for i, c in enumerate(f):
if joined_filename[i] != c:
joined_filename = joined_filename[:i]
break
cue = CueSheet(f)
cue.read()
cue_objs.append(cue)
joined_filename = joined_filename.rstrip(u'CD').rstrip()
#~ print joined_filename
x = cue_objs[0].join(cue_objs[1], joined_filename)
| gpl-3.0 | 6,097,720,098,156,660,000 | 25.020588 | 102 | 0.602917 | false | 2.847799 | false | false | false |
jrydberg/guild | examples/ring.py | 1 | 1777 | # Copyright (c) 2012, Johan Rydberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Example that builds a ring of actors and then send a message
through the ring.
"""
from guild import actor
import gevent
def forward(receive, address):
pat, data = receive()
address | data
def build(receive, n):
ring = []
for i in range(n):
if not ring:
node = actor.spawn(forward, actor.curaddr())
else:
node = actor.spawn(forward, ring[-1])
ring.append(node)
gevent.sleep()
ring[-1] | {'text': 'hello around the ring'}
pat, data = receive()
return data
mesh = actor.Mesh()
node = actor.Node(mesh, 'cookie@localhost.local:3232')
addr = node.spawn(build, 10000)
print node.wait(addr)
| mit | -1,933,545,526,922,268,200 | 31.907407 | 70 | 0.714688 | false | 4.066362 | false | false | false |
cashelcomputers/basaGC | basagc/telemachus.py | 1 | 5005 | #!/usr/bin/env python3
"""This module contains code that interacts with the Telemachus mod to access KSP telemetry"""
import json
import urllib.error
import urllib.parse
import urllib.request
from basagc import config
from basagc import utils
if config.DEBUG:
from pudb import set_trace # lint:ok
telemetry = {}
commands = {}
class TelemetryNotAvailable(Exception):
"""This exception should be raised when we do not have a list of available telemetry"""
pass
class KSPNotConnected(Exception):
""" This exception should be raised when there is no connection to KSP """
pass
def check_connection():
""" Checks if there is a connection available to Telemachus
Returns True if so, False otherwise
"""
try:
urllib.request.urlopen(config.URL + "paused=p.paused")
except urllib.error.URLError:
return False
else:
return True
def get_api_listing():
""" Gets the list of API calls provided by Telemachus
:rtype: dict
"""
global telemetry
global commands
try:
response = urllib.request.urlopen(config.URL + "api=a.api")
except urllib.error.URLError:
raise KSPNotConnected
response_string = response.read().decode('utf-8')
data = json.loads(response_string)
for a in data.values():
for b in a:
if b["apistring"].startswith("b."):
name = "body_" + b["apistring"].rsplit(".", 1)[1]
elif b["apistring"].startswith("tar."):
name = "target_" + b["apistring"].rsplit(".", 1)[1]
elif b["apistring"].startswith("f.") or b["apistring"].startswith("mj.") or \
b["apistring"].startswith("v.set"):
command = b["apistring"].rsplit(".", 1)[1]
commands[command] = b["apistring"]
continue
else:
name = b["apistring"].rsplit(".", 1)[1]
telemetry[name] = b["apistring"]
def get_telemetry(data, body_number=None):
""" Contacts telemachus for the requested data.
:param data: The API call required
:type data: str | float
:param body_number: Specify which body to obtain data for
:type body_number: string
:rtype: string
"""
# if telemetry is None:
# raise TelemetryNotAvailable
try:
query_string = data + "=" + telemetry[data]
except KeyError:
raise KSPNotConnected
return
if body_number:
query_string += "[{}]".format(body_number)
try:
raw_response = urllib.request.urlopen(config.URL + query_string)
except urllib.error.URLError:
utils.log("Query string: {}".format(query_string), log_level="ERROR")
utils.log("Caught exception urllib2.URLERROR", log_level="ERROR")
raise KSPNotConnected
response_string = raw_response.read().decode("utf-8)")
json_response = json.loads(response_string)
return json_response[data]
# def enable_smartass():
# query_string = "command="
def set_mechjeb_smartass(direction):
command_string = "command=" + commands[direction]
send_command_to_ksp(command_string)
def disable_smartass():
command_string = "command=" + commands["smartassoff"]
send_command_to_ksp(command_string)
def set_throttle(throttle_percent):
if throttle_percent == 0:
throttle_magnitude = 0
else:
throttle_magnitude = throttle_percent / 100.0
command_string = "command=" + commands["setThrottle"] + "[" + str(throttle_magnitude) + "]"
send_command_to_ksp(command_string)
def cut_throttle():
command_string = "command=" + commands["throttleZero"]
send_command_to_ksp(command_string)
def send_command_to_ksp(command_string):
try:
urllib.request.urlopen(config.URL + command_string)
except urllib.error.URLError:
utils.log("Query string: {}".format(command_string), log_level="ERROR")
utils.log("Caught exception urllib2.URLERROR", log_level="ERROR")
raise KSPNotConnected
def print_all_telemetry():
print("Telemetry available:")
for item in sorted(telemetry):
print("- " + item)
print()
print("Commands available:")
for item in sorted(commands):
print("- " + item)
def add_maneuver_node(ut, delta_v):
ut = str(round(ut, 2))
delta_v_x = str(round(delta_v[0], 2))
delta_v_y = str(round(delta_v[1], 2))
delta_v_z = str(round(delta_v[2], 2))
command_string = "command=" + telemetry["addManeuverNode"] + "[" + str(ut) + "," + delta_v_x + "," + delta_v_y + "," + delta_v_z + "]"
send_command_to_ksp(command_string)
def update_maneuver_node(ut, delta_v):
ut = str(round(ut, 2))
delta_v_x = str(round(delta_v[0], 2))
delta_v_y = str(round(delta_v[1], 2))
delta_v_z = str(round(delta_v[2], 2))
command_string = "command=" + telemetry["updateManeuverNode"] + "[0," + str(ut) + "," + delta_v_x + "," + delta_v_y + "," + delta_v_z + "]"
send_command_to_ksp(command_string) | gpl-2.0 | 382,405,559,617,665,600 | 30.484277 | 145 | 0.620979 | false | 3.580114 | true | false | false |
cryptickp/heat | heat/common/timeutils.py | 4 | 2566 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utilities for handling ISO 8601 duration format.
"""
import random
import re
import time
from heat.common.i18n import _
iso_duration_re = re.compile('PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?$')
wallclock = time.time
class Duration(object):
'''
Note that we don't attempt to handle leap seconds or large clock
jumps here. The latter are assumed to be rare and the former
negligible in the context of the timeout. Time zone adjustments,
Daylight Savings and the like *are* handled. PEP 418 adds a proper
monotonic clock, but only in Python 3.3.
'''
def __init__(self, timeout=0):
self._endtime = wallclock() + timeout
def expired(self):
return wallclock() > self._endtime
def endtime(self):
return self._endtime
def parse_isoduration(duration):
"""
Convert duration in ISO 8601 format to second(s).
Year, Month, Week, and Day designators are not supported.
Example: 'PT12H30M5S'
"""
result = iso_duration_re.match(duration)
if not result:
raise ValueError(_('Only ISO 8601 duration format of the form '
'PT#H#M#S is supported.'))
t = 0
t += (3600 * int(result.group(1))) if result.group(1) else 0
t += (60 * int(result.group(2))) if result.group(2) else 0
t += int(result.group(3)) if result.group(3) else 0
return t
def retry_backoff_delay(attempt, scale_factor=1.0, jitter_max=0.0):
"""
Calculate an exponential backoff delay with jitter.
Delay is calculated as
2^attempt + (uniform random from [0,1) * jitter_max)
:param attempt: The count of the current retry attempt
:param scale_factor: Multiplier to scale the exponential delay by
:param jitter_max: Maximum of random seconds to add to the delay
:returns: Seconds since epoch to wait until
"""
exp = float(2 ** attempt) * float(scale_factor)
if jitter_max == 0.0:
return exp
return exp + random.random() * jitter_max
| apache-2.0 | -719,043,924,277,491,600 | 30.292683 | 78 | 0.664069 | false | 3.697406 | false | false | false |
tgl-dogg/BCC-2s14-PI4-SteampunkSpider | src/steampunk_spider/database/correlacaoGenero.py | 1 | 2096 | # pegar a quantidade de horas totais em cada gênero
SELECT genre.name, SUM(hours) AS soma
FROM rel_player_software
INNER JOIN rel_software_genre ON rel_player_software.fk_software = rel_software_genre.fk_software
INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre
GROUP BY rel_software_genre.fk_genre
ORDER BY soma DESC
LIMIT 5
#teste
SELECT genre.name AS genero, SUM(rel_player_software.hours) AS horas
FROM rel_player_software
INNER JOIN player ON rel_player_software.fk_player = player.id_player
INNER JOIN software ON rel_player_software.fk_software = software.id_software
INNER JOIN genre ON rel_software_genre = genre.id_genre
LIMIT 1
#teste
SELECT player.name AS jogador, genre.name AS genre, SUM(rel_player_software.hours) AS horas
FROM rel_player_software
INNER JOIN player ON rel_player_software.fk_player = player.id_player
INNER JOIN software ON rel_player_software.fk_software = software.id_software
INNER JOIN rel_software_genre ON rel_software_genre.fk_software = software.id_software
INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre
GROUP BY jogador
ORDER BY horas DESC
LIMIT 3
#teste
(SELECT player.username AS jogador, genre.name AS genero, SUM(rel_player_software.hours) AS horas
FROM rel_player_software
INNER JOIN player ON rel_player_software.fk_player = player.id_player
INNER JOIN rel_software_genre ON rel_software_genre.fk_software = rel_player_software.fk_software
INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre
WHERE player.id_player <= 3
GROUP BY jogador, genero
ORDER BY jogador, horas DESC)
# pegar o gênero mais jogado de cada jogador
SELECT jogador, genero, MAX(horas) FROM
(SELECT player.username AS jogador, genre.name AS genero, SUM(rel_player_software.hours) AS horas
FROM (rel_player_software
INNER JOIN player ON rel_player_software.fk_player = player.id_player
INNER JOIN rel_software_genre ON rel_software_genre.fk_software = rel_player_software.fk_software
INNER JOIN genre ON rel_software_genre.fk_genre = genre.id_genre)
GROUP BY jogador, genero
ORDER BY jogador, horas DESC) p
GROUP BY jogador | mit | 595,603,466,192,664,800 | 42.645833 | 97 | 0.795129 | false | 3.148872 | false | true | false |
HandyGuySoftware/dupReport | dupapprise.py | 1 | 6499 | #####
#
# Module name: dupApprise.py
# Purpose: Management class for Apprise notification service
#
# Notes: Uses the Apprise push notification utility from @caronc
# https://github.com/caronc/apprise
# For any Apprise support or feature requests, please see the Apprise GitHub site
#
#####
# Import system modules
import db
import drdatetime
# Import dupReport modules
import globs
class dupApprise:
appriseConn = None
appriseOpts = None
services = None
def __init__(self):
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Initializing Apprise support')
import apprise
# Read name/value pairs from [apprise] section
self.appriseOpts = globs.optionManager.getRcSection('apprise')
if 'services' not in self.appriseOpts:
globs.log.write(globs.SEV_ERROR, function='Apprise', action='Init', msg='Error: No services defined for Apprise notification')
globs.closeEverythingAndExit(1) # Abort program. Can't continue
# Set defaults for missing values
self.appriseOpts['title'] = 'Apprise Notification for #SRCDEST# Backup' if 'title' not in self.appriseOpts else self.appriseOpts['title']
self.appriseOpts['body'] = 'Completed at #COMPLETETIME#: #RESULT# - #ERRMSG#' if 'body' not in self.appriseOpts else self.appriseOpts['body']
self.appriseOpts['titletruncate'] = '0' if 'titletruncate' else self.appriseOpts['titletruncate']
self.appriseOpts['bodytruncate'] = '0' if 'bodytruncate' not in self.appriseOpts else self.appriseOpts['bodytruncate']
self.appriseOpts['msglevel'] = 'failure' if 'msglevel' not in self.appriseOpts else self.appriseOpts['msglevel']
# Normalize .rc values
self.appriseOpts['titletruncate'] = int(self.appriseOpts['titletruncate'])
self.appriseOpts['bodytruncate'] = int(self.appriseOpts['bodytruncate'])
self.appriseOpts['msglevel'] = self.appriseOpts['msglevel'].lower()
# Check for correct message level indicator
if self.appriseOpts['msglevel'] not in ('success', 'warning', 'failure'):
globs.log.write(globs.SEV_ERROR, function='Apprise', action='Init', msg='Error: Bad apprise message level: {}'.format(self.appriseOpts['msglevel']))
globs.closeEverythingAndExit(1) # Abort program. Can't continue.
# Initialize apprise library
result = self.appriseConn = apprise.Apprise()
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Initializing Apprise library. Result={}'.format(result))
# Add individual service URLs to connection
self.services = self.appriseOpts['services'].split(",")
for i in self.services:
result = self.appriseConn.add(i)
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Added service {}, result={}'.format(i, result))
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='Init', msg='Apprise Initialization complete.')
return None
def parseMessage(self, msg, source, destination, result, message, warningmessage, errormessage, completetime):
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='parseMessage', msg=msg)
newMsg = msg
newMsg = newMsg.replace('#SOURCE#',source)
newMsg = newMsg.replace('#DESTINATION#',destination)
newMsg = newMsg.replace('#SRCDEST#','{}{}{}'.format(source, globs.opts['srcdestdelimiter'], destination))
newMsg = newMsg.replace('#RESULT#',result)
newMsg = newMsg.replace('#MESSAGE#',message)
newMsg = newMsg.replace('#ERRMSG#',errormessage)
newMsg = newMsg.replace('#WARNMSG#',warningmessage)
newMsg = newMsg.replace('#COMPLETETIME#','{} {}'.format(completetime[0], completetime[1]))
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='parseMessage', msg='New message=[{}]'.format(newMsg))
return newMsg
def sendNotifications(self):
sqlStmt = "SELECT source, destination, parsedResult, messages, warnings, errors, timestamp FROM report ORDER BY source"
dbCursor = globs.db.execSqlStmt(sqlStmt)
reportRows = dbCursor.fetchall()
for source, destination, parsedResult, messages, warnings, errors, timestamp in reportRows:
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Preparing Apprise message for {}-{}, parsedResult={} msglevel={}'.format(source, destination, parsedResult, self.appriseOpts['msglevel']))
# See if we need to send a notification based on the result status
if self.appriseOpts['msglevel'] == 'warning':
if parsedResult.lower() not in ('warning', 'failure'):
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Msglevel mismatch at warning level - skipping')
continue
elif self.appriseOpts['msglevel'] == 'failure':
if parsedResult.lower() != 'failure':
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Msglevel mismatch at failure level - skipping')
continue
globs.log.write(globs.SEV_DEBUG, function='Apprise', action='sendNotifications', msg='Apprise message is sendable.')
newTitle = self.parseMessage(self.appriseOpts['title'], source, destination, parsedResult, messages, warnings, errors, drdatetime.fromTimestamp(timestamp))
newBody = self.parseMessage(self.appriseOpts['body'], source, destination, parsedResult, messages, warnings, errors, drdatetime.fromTimestamp(timestamp))
tLen = self.appriseOpts['titletruncate']
if tLen != 0:
newTitle = (newTitle[:tLen]) if len(newTitle) > tLen else newTitle
bLen = self.appriseOpts['bodytruncate']
if bLen!= 0:
newBody = (newBody[:bLen]) if len(newBody) > bLen else newBody
globs.log.write(globs.SEV_DEBUG, function='Apprise', action='sendNotifications', msg='Sending notification: Title=[{}] Body=[{}]'.format(newTitle, newBody))
result = self.appriseConn.notify(title=newTitle, body=newBody)
globs.log.write(globs.SEV_NOTICE, function='Apprise', action='sendNotifications', msg='Apprise sent. Result={}.'.format(result))
return
| mit | -8,268,305,749,878,739,000 | 55.025862 | 237 | 0.667487 | false | 3.95076 | false | false | false |
FeitengLab/EmotionMap | 1SiteRanking/RegionExtract/union.py | 1 | 2843 | #分别得到min,max和Union三个含有时间的txt
import psycopg2
#链接到我的PostgreSQL数据库
connection = psycopg2.connect(database="postgres", user="postgres", host="127.0.0.1", port='5432', password="kwy17502X")
print(1)
#获取游标
cursor = connection.cursor()
print(2)
try:
cursor.execute("drop table min")
cursor.execute("drop table max")
# cursor.execute("drop table uniontime")
cursor.execute("CREATE TABLE min (id serial PRIMARY KEY, userid text , min text);")
print(3)
cursor.execute("CREATE TABLE max (id serial PRIMARY KEY, userid text , max text);")
print(4)
#将max导入数据库
filemax = open('E:/max.txt')
linesmax = filemax.readlines()
filemax.close()
numbermax=1
for linemax in linesmax:
if(numbermax%2):
linemax = linemax.split(',')
linemax[1]=linemax[1].replace('\"','')
print(linemax[0])
# 修改表名
cursor.execute("INSERT INTO max (userid,max) VALUES (%s,%s)", (linemax[0],linemax[1]))
numbermax=numbermax+1
#将min导入数据库
filemin = open('E:/min.txt')
linesmin = filemin.readlines()
filemin.close()
numbermin = 1
for linemin in linesmin:
if (numbermin % 2):
linemin = linemin.split(',')
linemin[1] = linemin[1].replace('\"', '')
print(linemin[0])
# 修改表名
cursor.execute("INSERT INTO min (userid,min) VALUES (%s,%s)", (linemin[0], linemin[1]))
numbermin = numbermin + 1
#导出union结果到union.txt
cursor.execute("copy(SELECT max.userid, max.max,min.min FROM max INNER JOIN min ON max.userid=min.userid ) to 'E:/union.txt' with csv;")
#将结果存入数据库——未完成
# cursor.execute("CREATE TABLE uniontime (id serial PRIMARY KEY , userid text , maxtime text , mintime text);")
# numberunion=0
# fileunion = open('E:/union.txt')
# linesunion = fileunion.readlines()
# fileunion.close()
# numberunion = 1
# for lineunion in linesunion:
# lineunion=lineunion.replace('\"','')
# lineunion = lineunion.replace('\n', '')
# lineunion = lineunion.split(',')
# print(lineunion)
# print(133)
# if ((numberunion % 3)==1):
# 修改表名
# numberunion = numberunion + 1
# cursor.execute("INSERT INTO uniontime (userid,maxtime) VALUES (%s,%s,%s)", (lineunion[0], str(lineunion[1])))
# if ((numberunion % 3)==2):
# # 修改表名
# numberunion = numberunion + 1
# cursor.execute("INSERT INTO uniontime (mintime) VALUES (%s)", (str(lineunion[1]),))
# if ((numberunion % 3)==0):
# numberunion = numberunion + 1
except Exception as e:
print(repr(e))
connection.commit()
connection.close()
| mit | -1,837,974,392,402,139,100 | 32.37037 | 140 | 0.601554 | false | 3.139373 | false | false | false |
gooddata/zuul | zuul/source/gerrit.py | 1 | 13993 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import time
from zuul import exceptions
from zuul.model import Change, Ref
from zuul.source import BaseSource
# Walk the change dependency tree to find a cycle
def detect_cycle(change, history=None):
if history is None:
history = []
else:
history = history[:]
history.append(change.number)
for dep in change.needs_changes:
if dep.number in history:
raise Exception("Dependency cycle detected: %s in %s" % (
dep.number, history))
detect_cycle(dep, history)
class GerritSource(BaseSource):
name = 'gerrit'
log = logging.getLogger("zuul.source.Gerrit")
replication_timeout = 300
replication_retry_interval = 5
depends_on_re = re.compile(r"^Depends-On: (I[0-9a-f]{40})\s*$",
re.MULTILINE | re.IGNORECASE)
def getRefSha(self, project, ref):
refs = {}
try:
refs = self.connection.getInfoRefs(project)
except:
self.log.exception("Exception looking for ref %s" %
ref)
sha = refs.get(ref, '')
return sha
def _waitForRefSha(self, project, ref, old_sha=''):
# Wait for the ref to show up in the repo
start = time.time()
while time.time() - start < self.replication_timeout:
sha = self.getRefSha(project.name, ref)
if old_sha != sha:
return True
time.sleep(self.replication_retry_interval)
return False
def isMerged(self, change, head=None):
self.log.debug("Checking if change %s is merged" % change)
if not change.number:
self.log.debug("Change has no number; considering it merged")
# Good question. It's probably ref-updated, which, ah,
# means it's merged.
return True
data = self.connection.query(change.number)
change._data = data
change.is_merged = self._isMerged(change)
if change.is_merged:
self.log.debug("Change %s is merged" % (change,))
else:
self.log.debug("Change %s is not merged" % (change,))
if not head:
return change.is_merged
if not change.is_merged:
return False
ref = 'refs/heads/' + change.branch
self.log.debug("Waiting for %s to appear in git repo" % (change))
if self._waitForRefSha(change.project, ref, change._ref_sha):
self.log.debug("Change %s is in the git repo" %
(change))
return True
self.log.debug("Change %s did not appear in the git repo" %
(change))
return False
def _isMerged(self, change):
data = change._data
if not data:
return False
status = data.get('status')
if not status:
return False
if status == 'MERGED':
return True
return False
def canMerge(self, change, allow_needs):
if not change.number:
self.log.debug("Change has no number; considering it merged")
# Good question. It's probably ref-updated, which, ah,
# means it's merged.
return True
data = change._data
if not data:
return False
if 'submitRecords' not in data:
return False
try:
for sr in data['submitRecords']:
if sr['status'] == 'OK':
return True
elif sr['status'] == 'NOT_READY':
for label in sr['labels']:
if label['status'] in ['OK', 'MAY']:
continue
elif label['status'] in ['NEED', 'REJECT']:
# It may be our own rejection, so we ignore
if label['label'].lower() not in allow_needs:
return False
continue
else:
# IMPOSSIBLE
return False
else:
# CLOSED, RULE_ERROR
return False
except:
self.log.exception("Exception determining whether change"
"%s can merge:" % change)
return False
return True
def postConfig(self):
pass
def getChange(self, event, project):
if event.change_number:
refresh = False
change = self._getChange(event.change_number, event.patch_number,
refresh=refresh)
else:
change = Ref(project)
change.connection_name = self.connection.connection_name
change.ref = event.ref
change.oldrev = event.oldrev
change.newrev = event.newrev
change.url = self._getGitwebUrl(project, sha=event.newrev)
return change
def _getChange(self, number, patchset, refresh=False, history=None):
key = '%s,%s' % (number, patchset)
change = self.connection.getCachedChange(key)
if change and not refresh:
return change
if not change:
change = Change(None)
change.connection_name = self.connection.connection_name
change.number = number
change.patchset = patchset
key = '%s,%s' % (change.number, change.patchset)
self.connection.updateChangeCache(key, change)
try:
self._updateChange(change, history)
except Exception:
self.connection.deleteCachedChange(key)
raise
return change
def getProjectOpenChanges(self, project):
# This is a best-effort function in case Gerrit is unable to return
# a particular change. It happens.
query = "project:%s status:open" % (project.name,)
self.log.debug("Running query %s to get project open changes" %
(query,))
data = self.connection.simpleQuery(query)
changes = []
for record in data:
try:
changes.append(
self._getChange(record['number'],
record['currentPatchSet']['number']))
except Exception:
self.log.exception("Unable to query change %s" %
(record.get('number'),))
return changes
def _getDependsOnFromCommit(self, message, change):
records = []
seen = set()
for match in self.depends_on_re.findall(message):
if match in seen:
self.log.debug("Ignoring duplicate Depends-On: %s" %
(match,))
continue
seen.add(match)
query = "change:%s" % (match,)
self.log.debug("Updating %s: Running query %s "
"to find needed changes" %
(change, query,))
records.extend(self.connection.simpleQuery(query))
return records
def _getNeededByFromCommit(self, change_id, change):
records = []
seen = set()
query = 'message:%s' % change_id
self.log.debug("Updating %s: Running query %s "
"to find changes needed-by" %
(change, query,))
results = self.connection.simpleQuery(query)
for result in results:
for match in self.depends_on_re.findall(
result['commitMessage']):
if match != change_id:
continue
key = (result['number'], result['currentPatchSet']['number'])
if key in seen:
continue
self.log.debug("Updating %s: Found change %s,%s "
"needs %s from commit" %
(change, key[0], key[1], change_id))
seen.add(key)
records.append(result)
return records
def _updateChange(self, change, history=None):
self.log.info("Updating %s" % (change,))
data = self.connection.query(change.number)
change._data = data
if change.patchset is None:
change.patchset = data['currentPatchSet']['number']
if 'project' not in data:
raise exceptions.ChangeNotFound(change.number, change.patchset)
change.project = self.sched.getProject(data['project'])
change.branch = data['branch']
change.url = data['url']
max_ps = 0
files = []
for ps in data['patchSets']:
if ps['number'] == change.patchset:
change.refspec = ps['ref']
for f in ps.get('files', []):
files.append(f['file'])
if int(ps['number']) > int(max_ps):
max_ps = ps['number']
if max_ps == change.patchset:
change.is_current_patchset = True
else:
change.is_current_patchset = False
change.files = files
change.is_merged = self._isMerged(change)
change.approvals = data['currentPatchSet'].get('approvals', [])
change.open = data['open']
change.status = data['status']
change.owner = data['owner']
if change.is_merged:
# This change is merged, so we don't need to look any further
# for dependencies.
self.log.debug("Updating %s: change is merged" % (change,))
return change
if history is None:
history = []
else:
history = history[:]
history.append(change.number)
needs_changes = []
if 'dependsOn' in data:
parts = data['dependsOn'][0]['ref'].split('/')
dep_num, dep_ps = parts[3], parts[4]
if dep_num in history:
raise Exception("Dependency cycle detected: %s in %s" % (
dep_num, history))
self.log.debug("Updating %s: Getting git-dependent change %s,%s" %
(change, dep_num, dep_ps))
dep = self._getChange(dep_num, dep_ps, history=history)
# Because we are not forcing a refresh in _getChange, it
# may return without executing this code, so if we are
# updating our change to add ourselves to a dependency
# cycle, we won't detect it. By explicitly performing a
# walk of the dependency tree, we will.
detect_cycle(dep, history)
if (not dep.is_merged) and dep not in needs_changes:
needs_changes.append(dep)
for record in self._getDependsOnFromCommit(data['commitMessage'],
change):
dep_num = record['number']
dep_ps = record['currentPatchSet']['number']
if dep_num in history:
raise Exception("Dependency cycle detected: %s in %s" % (
dep_num, history))
self.log.debug("Updating %s: Getting commit-dependent "
"change %s,%s" %
(change, dep_num, dep_ps))
dep = self._getChange(dep_num, dep_ps, history=history)
# Because we are not forcing a refresh in _getChange, it
# may return without executing this code, so if we are
# updating our change to add ourselves to a dependency
# cycle, we won't detect it. By explicitly performing a
# walk of the dependency tree, we will.
detect_cycle(dep, history)
if (not dep.is_merged) and dep not in needs_changes:
needs_changes.append(dep)
change.needs_changes = needs_changes
needed_by_changes = []
if 'neededBy' in data:
for needed in data['neededBy']:
parts = needed['ref'].split('/')
dep_num, dep_ps = parts[3], parts[4]
self.log.debug("Updating %s: Getting git-needed change %s,%s" %
(change, dep_num, dep_ps))
dep = self._getChange(dep_num, dep_ps)
if (not dep.is_merged) and dep.is_current_patchset:
needed_by_changes.append(dep)
for record in self._getNeededByFromCommit(data['id'], change):
dep_num = record['number']
dep_ps = record['currentPatchSet']['number']
self.log.debug("Updating %s: Getting commit-needed change %s,%s" %
(change, dep_num, dep_ps))
# Because a commit needed-by may be a cross-repo
# dependency, cause that change to refresh so that it will
# reference the latest patchset of its Depends-On (this
# change).
dep = self._getChange(dep_num, dep_ps, refresh=True)
if (not dep.is_merged) and dep.is_current_patchset:
needed_by_changes.append(dep)
change.needed_by_changes = needed_by_changes
return change
def getGitUrl(self, project):
return self.connection.getGitUrl(project)
def _getGitwebUrl(self, project, sha=None):
return self.connection.getGitwebUrl(project, sha)
| apache-2.0 | 2,095,542,611,283,106,300 | 38.640227 | 79 | 0.538841 | false | 4.405856 | false | false | false |
edsonlb/PoloVota | projects/models.py | 1 | 2825 | # This Python file uses the following encoding: utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Project(models.Model):
area = models.CharField(_('área'), max_length=100, blank=False, null=False)
tema = models.CharField(_('tema'), max_length=200, blank=False, null=False)
descricao = models.CharField(_('descrição'), max_length=400, blank=False, null=False)
universidade = models.CharField(_('universidade'), max_length=200, null=False)
universidadeOrientador = models.CharField(_('orientador'), max_length=200, blank=True, null=True)
liderNome = models.CharField(_('líder'), max_length=200, null=False, blank=False)
liderTelefone = models.CharField(_('telefone'), max_length=20, blank=False, null=False)
liderEmail = models.EmailField(_('email'), max_length=100, null=False, blank=False)
liderSocial = models.CharField(_('rede social'), max_length=200, blank=True)
liderIntegrantes = models.CharField(_('integrantes'), max_length=400, blank=True, null=True)
link_slides = models.CharField(_('slides'), max_length=300, blank=True)
link_monografia = models.CharField(_('monografia'), max_length=300, blank=True)
link_modelagem = models.CharField(_('modelagem'), max_length=300, blank=True)
link_website = models.CharField(_('website'), max_length=300, blank=True)
link_outros = models.CharField(_('outros'), max_length=300, blank=True)
link_versionamento = models.CharField(_('versionamento'), max_length=300, blank=True)
etapa = models.CharField(_('etapa'), max_length=3, blank=True)
tags = models.CharField(_('tags'), max_length=300, blank=True)
ativo = models.CharField(_('ativo'), max_length=3, default='VAL')
dataAlteracao = models.DateTimeField(_('data de alteracao'), auto_now=True, auto_now_add=True)
dataCadastro = models.DateTimeField(_('data de cadastro'), auto_now=False, auto_now_add=True)
class Meta:
ordering = ['dataCadastro']
verbose_name = _(u'projeto')
verbose_name_plural = _(u'projetos')
def __unicode__(self):
return self.tema +' - '+ self.liderNome
def save(self, force_insert=False, force_update=False):
self.area = self.area.upper()
self.tema = self.tema.upper()
self.descricao = self.descricao.upper()
self.universidade = self.universidade.upper()
self.universidadeOrientador = self.universidadeOrientador.upper()
self.liderNome = self.liderNome.upper()
self.liderEmail = self.liderEmail.upper()
self.liderIntegrantes = self.liderIntegrantes.upper()
self.etapa = self.etapa.upper()
self.tags = self.tags.upper()
self.ativo = self.ativo.upper()
super(Project, self).save(force_insert, force_update) | mit | -2,586,360,229,164,140,500 | 52.245283 | 101 | 0.68061 | false | 3.334515 | false | false | false |
samuelmaudo/yepes | yepes/fields/comma_separated.py | 1 | 2515 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import re
from django.db import models
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from yepes import forms
from yepes.fields.char import CharField
from yepes.utils.deconstruct import clean_keywords
class CommaSeparatedField(CharField):
description = _('Comma-separated strings')
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 255)
kwargs['normalize_spaces'] = False
self.separator = kwargs.pop('separator', ', ')
self.separator_re = re.compile(
'\s*{0}\s*'.format(re.escape(self.separator.strip())),
re.UNICODE,
)
kwargs['trim_spaces'] = False
super(CommaSeparatedField, self).__init__(*args, **kwargs)
def clean(self, value, model_instance):
value = self.get_prep_value(value)
self.validate(value, model_instance)
self.run_validators(value)
return self.to_python(value)
def deconstruct(self):
name, path, args, kwargs = super(CommaSeparatedField, self).deconstruct()
path = path.replace('yepes.fields.comma_separated', 'yepes.fields')
clean_keywords(self, kwargs, variables={
'max_length': 255,
'separator': ', ',
}, constants=[
'normalize_spaces',
'trim_spaces',
])
return name, path, args, kwargs
def formfield(self, **kwargs):
kwargs.setdefault('form_class', forms.CommaSeparatedField)
kwargs.setdefault('separator', self.separator)
return super(CommaSeparatedField, self).formfield(**kwargs)
def from_db_value(self, value, expression, connection, context):
if value is None:
return value
else:
return self.separator_re.split(value)
def get_prep_value(self, value):
value = models.Field.get_prep_value(self, value)
if value is None or isinstance(value, six.string_types):
return value
else:
return self.separator.join(value)
def to_python(self, value):
if value is None:
return value
elif not value:
return []
elif isinstance(value, six.string_types):
return self.separator_re.split(value)
else:
return list(value)
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
| bsd-3-clause | -1,359,136,921,683,177,200 | 30.4375 | 81 | 0.614712 | false | 4.063005 | false | false | false |
MAndelkovic/pybinding | pybinding/chebyshev.py | 1 | 22137 | """Computations based on Chebyshev polynomial expansion
The kernel polynomial method (KPM) can be used to approximate various functions by expanding them
in a series of Chebyshev polynomials.
"""
import warnings
import numpy as np
import scipy
from . import _cpp
from . import results
from .model import Model
from .system import System
from .utils.time import timed
from .support.deprecated import LoudDeprecationWarning
__all__ = ['KernelPolynomialMethod', 'kpm', 'kpm_cuda',
'jackson_kernel', 'lorentz_kernel', 'dirichlet_kernel']
class SpatialLDOS:
"""Holds the results of :meth:`KPM.calc_spatial_ldos`
It's a product of a :class:`Series` and a :class:`StructureMap`.
"""
def __init__(self, data, energy, structure):
self.data = data
self.energy = energy
self.structure = structure
def structure_map(self, energy):
"""Return a :class:`StructureMap` of the spatial LDOS at the given energy
Parameters
----------
energy : float
Produce a structure map for LDOS data closest to this energy value.
"""
idx = np.argmin(abs(self.energy - energy))
return self.structure.with_data(self.data[idx])
def ldos(self, position, sublattice=""):
"""Return the LDOS as a function of energy at a specific position
Parameters
----------
position : array_like
sublattice : Optional[str]
"""
idx = self.structure.find_nearest(position, sublattice)
return results.Series(self.energy, self.data[:, idx],
labels=dict(variable="E (eV)", data="LDOS", columns="orbitals"))
class KernelPolynomialMethod:
"""The common interface for various KPM implementations
It should not be created directly but via specific functions
like :func:`kpm` or :func:`kpm_cuda`.
All implementations are based on: https://doi.org/10.1103/RevModPhys.78.275
"""
def __init__(self, impl):
self.impl = impl
@property
def model(self) -> Model:
"""The tight-binding model holding the Hamiltonian"""
return self.impl.model
@model.setter
def model(self, model):
self.impl.model = model
@property
def system(self) -> System:
"""The tight-binding system (shortcut for `KernelPolynomialMethod.model.system`)"""
return System(self.impl.system)
@property
def scaling_factors(self) -> tuple:
"""A tuple of KPM scaling factors `a` and `b`"""
return self.impl.scaling_factors
@property
def kernel(self):
"""The damping kernel"""
return self.impl.kernel
def report(self, shortform=False):
"""Return a report of the last computation
Parameters
----------
shortform : bool, optional
Return a short one line version of the report
"""
return self.impl.report(shortform)
def __call__(self, *args, **kwargs):
warnings.warn("Use .calc_greens() instead", LoudDeprecationWarning)
return self.calc_greens(*args, **kwargs)
def moments(self, num_moments, alpha, beta=None, op=None):
r"""Calculate KPM moments in the form of expectation values
The result is an array of moments where each value is equal to:
.. math::
\mu_n = <\beta|op \cdot T_n(H)|\alpha>
Parameters
----------
num_moments : int
The number of moments to calculate.
alpha : array_like
The starting state vector of the KPM iteration.
beta : Optional[array_like]
If not given, defaults to :math:`\beta = \alpha`.
op : Optional[csr_matrix]
Operator in the form of a sparse matrix. If omitted, an identity matrix
is assumed: :math:`\mu_n = <\beta|T_n(H)|\alpha>`.
Returns
-------
ndarray
"""
from scipy.sparse import csr_matrix
if beta is None:
beta = []
if op is None:
op = csr_matrix([])
else:
op = op.tocsr()
return self.impl.moments(num_moments, alpha, beta, op)
def calc_greens(self, i, j, energy, broadening):
"""Calculate Green's function of a single Hamiltonian element
Parameters
----------
i, j : int
Hamiltonian indices.
energy : ndarray
Energy value array.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
Returns
-------
ndarray
Array of the same size as the input `energy`.
"""
return self.impl.calc_greens(i, j, energy, broadening)
def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True):
"""Calculate the local density of states as a function of energy
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
position : array_like
Cartesian position of the lattice site for which the LDOS is calculated.
Doesn't need to be exact: the method will find the actual site which is
closest to the given position.
sublattice : str
Only look for sites of a specific sublattice, closest to `position`.
The default value considers any sublattice.
reduce : bool
This option is only relevant for multi-orbital models. If true, the
resulting LDOS will summed over all the orbitals at the target site
and the result will be a 1D array. If false, the individual orbital
results will be preserved and the result will be a 2D array with
`shape == (energy.size, num_orbitals)`.
Returns
-------
:class:`~pybinding.Series`
"""
ldos = self.impl.calc_ldos(energy, broadening, position, sublattice, reduce)
return results.Series(energy, ldos.squeeze(), labels=dict(variable="E (eV)", data="LDOS",
columns="orbitals"))
def calc_spatial_ldos(self, energy, broadening, shape, sublattice=""):
"""Calculate the LDOS as a function of energy and space (in the area of the given shape)
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
shape : Shape
Determines the site positions at which to do the calculation.
sublattice : str
Only look for sites of a specific sublattice, within the `shape`.
The default value considers any sublattice.
Returns
-------
:class:`SpatialLDOS`
"""
ldos = self.impl.calc_spatial_ldos(energy, broadening, shape, sublattice)
smap = self.system[shape.contains(*self.system.positions)]
if sublattice:
smap = smap[smap.sub == sublattice]
return SpatialLDOS(ldos, energy, smap)
def calc_dos(self, energy, broadening, num_random=1):
"""Calculate the density of states as a function of energy
Parameters
----------
energy : ndarray
Values for which the DOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
num_random : int
The number of random vectors to use for the stochastic calculation of KPM moments.
Larger numbers improve the quality of the result but also increase calculation time
linearly. Fortunately, result quality also improves with system size, so the DOS of
very large systems can be calculated accurately with only a small number of random
vectors.
Returns
-------
:class:`~pybinding.Series`
"""
dos = self.impl.calc_dos(energy, broadening, num_random)
return results.Series(energy, dos, labels=dict(variable="E (eV)", data="DOS"))
def deferred_ldos(self, energy, broadening, position, sublattice=""):
"""Same as :meth:`calc_ldos` but for parallel computation: see the :mod:`.parallel` module
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
position : array_like
Cartesian position of the lattice site for which the LDOS is calculated.
Doesn't need to be exact: the method will find the actual site which is
closest to the given position.
sublattice : str
Only look for sites of a specific sublattice, closest to `position`.
The default value considers any sublattice.
Returns
-------
Deferred
"""
return self.impl.deferred_ldos(energy, broadening, position, sublattice)
def calc_conductivity(self, chemical_potential, broadening, temperature,
direction="xx", volume=1.0, num_random=1, num_points=1000):
"""Calculate Kubo-Bastin electrical conductivity as a function of chemical potential
The return value is in units of the conductance quantum (e^2 / hbar) not taking into
account spin or any other degeneracy.
The calculation is based on: https://doi.org/10.1103/PhysRevLett.114.116602.
Parameters
----------
chemical_potential : array_like
Values (in eV) for which the conductivity is calculated.
broadening : float
Width (in eV) of the smallest detail which can be resolved in the chemical potential.
Lower values result in longer calculation time.
temperature : float
Value of temperature for the Fermi-Dirac distribution.
direction : Optional[str]
Direction in which the conductivity is calculated. E.g., "xx", "xy", "zz", etc.
volume : Optional[float]
The volume of the system.
num_random : int
The number of random vectors to use for the stochastic calculation of KPM moments.
Larger numbers improve the quality of the result but also increase calculation time
linearly. Fortunately, result quality also improves with system size, so the DOS of
very large systems can be calculated accurately with only a small number of random
vectors.
num_points : Optional[int]
Number of points for integration.
Returns
-------
:class:`~pybinding.Series`
"""
data = self.impl.calc_conductivity(chemical_potential, broadening, temperature,
direction, num_random, num_points)
if volume != 1.0:
data /= volume
return results.Series(chemical_potential, data,
labels=dict(variable=r"$\mu$ (eV)", data="$\sigma (e^2/h)$"))
class _ComputeProgressReporter:
def __init__(self):
from .utils.progressbar import ProgressBar
self.pbar = ProgressBar(0)
def __call__(self, delta, total):
if total == 1:
return # Skip reporting for short jobs
if delta < 0:
print("Computing KPM moments...")
self.pbar.size = total
self.pbar.start()
elif delta == total:
self.pbar.finish()
else:
self.pbar += delta
def kpm(model, energy_range=None, kernel="default", num_threads="auto", silent=False, **kwargs):
"""The default CPU implementation of the Kernel Polynomial Method
This implementation works on any system and is well optimized.
Parameters
----------
model : Model
Model which will provide the Hamiltonian matrix.
energy_range : Optional[Tuple[float, float]]
KPM needs to know the lowest and highest eigenvalue of the Hamiltonian, before
computing the expansion moments. By default, this is determined automatically
using a quick Lanczos procedure. To override the automatic boundaries pass a
`(min_value, max_value)` tuple here. The values can be overestimated, but note
that performance drops as the energy range becomes wider. On the other hand,
underestimating the range will produce `NaN` values in the results.
kernel : Kernel
The kernel in the *Kernel* Polynomial Method. Used to improve the quality of
the function reconstructed from the Chebyshev series. Possible values are
:func:`jackson_kernel` or :func:`lorentz_kernel`. The Jackson kernel is used
by default.
num_threads : int
The number of CPU threads to use for calculations. This is automatically set
to the number of logical cores available on the current machine.
silent : bool
Don't show any progress messages.
Returns
-------
:class:`~pybinding.chebyshev.KernelPolynomialMethod`
"""
if kernel != "default":
kwargs["kernel"] = kernel
if num_threads != "auto":
kwargs["num_threads"] = num_threads
if "progress_callback" not in kwargs:
kwargs["progress_callback"] = _ComputeProgressReporter()
if silent:
del kwargs["progress_callback"]
return KernelPolynomialMethod(_cpp.kpm(model, energy_range or (0, 0), **kwargs))
def kpm_cuda(model, energy_range=None, kernel="default", **kwargs):
"""Same as :func:`kpm` except that it's executed on the GPU using CUDA (if supported)
See :func:`kpm` for detailed parameter documentation.
This method is only available if the C++ extension module was compiled with CUDA.
Parameters
----------
model : Model
energy_range : Optional[Tuple[float, float]]
kernel : Kernel
Returns
-------
:class:`~pybinding.chebyshev.KernelPolynomialMethod`
"""
try:
if kernel != "default":
kwargs["kernel"] = kernel
# noinspection PyUnresolvedReferences
return KernelPolynomialMethod(_cpp.kpm_cuda(model, energy_range or (0, 0), **kwargs))
except AttributeError:
raise Exception("The module was compiled without CUDA support.\n"
"Use a different KPM implementation or recompile the module with CUDA.")
def jackson_kernel():
"""The Jackson kernel -- a good general-purpose kernel, appropriate for most applications
Imposes Gaussian broadening `sigma = pi / N` where `N` is the number of moments. The
broadening value is user-defined for each function calculation (LDOS, Green's, etc.).
The number of moments is then determined based on the broadening -- it's not directly
set by the user.
"""
return _cpp.jackson_kernel()
def lorentz_kernel(lambda_value=4.0):
"""The Lorentz kernel -- best for Green's function
This kernel is most appropriate for the expansion of the Green’s function because it most
closely mimics the divergences near the true eigenvalues of the Hamiltonian. The Lorentzian
broadening is given by `epsilon = lambda / N` where `N` is the number of moments.
Parameters
----------
lambda_value : float
May be used to fine-tune the smoothness of the convergence. Usual values are
between 3 and 5. Lower values will speed up the calculation at the cost of
accuracy. If in doubt, leave it at the default value of 4.
"""
return _cpp.lorentz_kernel(lambda_value)
def dirichlet_kernel():
"""The Dirichlet kernel -- returns raw moments, least favorable choice
This kernel doesn't modify the moments at all. The resulting moments represent just
a truncated series which results in lots of oscillation in the reconstructed function.
Therefore, this kernel should almost never be used. It's only here in case the raw
moment values are needed for some other purpose. Note that `required_num_moments()`
returns `N = pi / sigma` for compatibility with the Jackson kernel, but there is
no actual broadening associated with the Dirichlet kernel.
"""
return _cpp.dirichlet_kernel()
class _PythonImpl:
"""Basic Python/SciPy implementation of KPM"""
def __init__(self, model, energy_range, kernel, **_):
self.model = model
self.energy_range = energy_range
self.kernel = kernel
self._stats = {}
@property
def stats(self):
class AttrDict(dict):
"""Allows dict items to be retrieved as attributes: d["item"] == d.item"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
s = AttrDict(self._stats)
s.update({k: v.elapsed for k, v in s.items() if "_time" in k})
s["eps"] = s["nnz"] / s["moments_time"]
return s
def _scaling_factors(self):
"""Compute the energy bounds of the model and return the appropriate KPM scaling factors"""
def find_bounds():
if self.energy_range[0] != self.energy_range[1]:
return self.energy_range
from scipy.sparse.linalg import eigsh
h = self.model.hamiltonian
self.energy_range = [eigsh(h, which=x, k=1, tol=2e-3, return_eigenvectors=False)[0]
for x in ("SA", "LA")]
return self.energy_range
with timed() as self._stats["bounds_time"]:
emin, emax = find_bounds()
self._stats["energy_min"] = emin
self._stats["energy_max"] = emax
tolerance = 0.01
a = 0.5 * (emax - emin) * (1 + tolerance)
b = 0.5 * (emax + emin)
return a, b
def _rescale_hamiltonian(self, h, a, b):
size = h.shape[0]
with timed() as self._stats["rescale_time"]:
return (h - b * scipy.sparse.eye(size)) * (2 / a)
def _compute_diagonal_moments(self, num_moments, starter, h2):
"""Procedure for computing KPM moments when the two vectors are identical"""
r0 = starter.copy()
r1 = h2.dot(r0) * 0.5
moments = np.zeros(num_moments, dtype=h2.dtype)
moments[0] = np.vdot(r0, r0) * 0.5
moments[1] = np.vdot(r1, r0)
for n in range(1, num_moments // 2):
r0 = h2.dot(r1) - r0
r0, r1 = r1, r0
moments[2 * n] = 2 * (np.vdot(r0, r0) - moments[0])
moments[2 * n + 1] = 2 * np.vdot(r1, r0) - moments[1]
self._stats["num_moments"] = num_moments
self._stats["nnz"] = h2.nnz * num_moments / 2
self._stats["vector_memory"] = r0.nbytes + r1.nbytes
self._stats["matrix_memory"] = (h2.data.nbytes + h2.indices.nbytes + h2.indptr.nbytes
if isinstance(h2, scipy.sparse.csr_matrix) else 0)
return moments
@staticmethod
def _exval_starter(h2, index):
"""Initial vector for the expectation value procedure"""
r0 = np.zeros(h2.shape[0], dtype=h2.dtype)
r0[index] = 1
return r0
@staticmethod
def _reconstruct_real(moments, energy, a, b):
"""Reconstruct a real function from KPM moments"""
scaled_energy = (energy - b) / a
ns = np.arange(moments.size)
k = 2 / (a * np.pi)
return np.array([k / np.sqrt(1 - w**2) * np.sum(moments.real * np.cos(ns * np.arccos(w)))
for w in scaled_energy])
def _ldos(self, index, energy, broadening):
"""Calculate the LDOS at the given Hamiltonian index"""
a, b = self._scaling_factors()
num_moments = self.kernel.required_num_moments(broadening / a)
h2 = self._rescale_hamiltonian(self.model.hamiltonian, a, b)
starter = self._exval_starter(h2, index)
with timed() as self._stats["moments_time"]:
moments = self._compute_diagonal_moments(num_moments, starter, h2)
with timed() as self._stats["reconstruct_time"]:
moments *= self.kernel.damping_coefficients(num_moments)
return self._reconstruct_real(moments, energy, a, b)
def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True):
"""Calculate the LDOS at the given position/sublattice"""
with timed() as self._stats["total_time"]:
system_index = self.model.system.find_nearest(position, sublattice)
ham_idx = self.model.system.to_hamiltonian_indices(system_index)
result_data = np.array([self._ldos(i, energy, broadening) for i in ham_idx]).T
if reduce:
return np.sum(result_data, axis=1)
else:
return result_data
def report(self, *_):
from .utils import with_suffix, pretty_duration
stats = self.stats.copy()
stats.update({k: with_suffix(stats[k]) for k in ("num_moments", "eps")})
stats.update({k: pretty_duration(v) for k, v in stats.items() if "_time" in k})
fmt = " ".join([
"{energy_min:.2f}, {energy_max:.2f} [{bounds_time}]",
"[{rescale_time}]",
"{num_moments} @ {eps}eps [{moments_time}]",
"[{reconstruct_time}]",
"| {total_time}"
])
return fmt.format_map(stats)
def _kpm_python(model, energy_range=None, kernel="default", **kwargs):
"""Basic Python/SciPy implementation of KPM"""
if kernel == "default":
kernel = jackson_kernel()
return KernelPolynomialMethod(_PythonImpl(model, energy_range or (0, 0), kernel, **kwargs))
| bsd-2-clause | -5,052,924,310,591,643,000 | 37.697552 | 99 | 0.610165 | false | 4.072677 | false | false | false |
sajuptpm/contrail-controller | src/config/svc-monitor/svc_monitor/port_tuple.py | 1 | 9521 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2014 Cloudwatt
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Rudra Rugge
from vnc_api.vnc_api import *
from config_db import *
from agent import Agent
class PortTupleAgent(Agent):
def __init__(self, svc_mon, vnc_lib, cassandra, config_section, logger):
super(PortTupleAgent, self).__init__(svc_mon, vnc_lib,
cassandra, config_section)
self._logger = logger
def handle_service_type(self):
return 'port-tuple'
def _allocate_iip_for_family(self, iip_family, si, port, vmi):
create_iip = True
update_vmi = False
iip_name = si.uuid + '-' + port['type'] + '-' + iip_family
for iip_id in si.instance_ips:
iip = InstanceIpSM.get(iip_id)
if iip and iip.name == iip_name:
create_iip = False
iip_id = iip.uuid
if iip.uuid not in vmi.instance_ips:
update_vmi = True
break
if create_iip:
iip_obj = InstanceIp(name=iip_name, instance_ip_family=iip_family)
vn_obj = self._vnc_lib.virtual_network_read(id=vmi.virtual_network)
iip_obj.add_virtual_network(vn_obj)
iip_obj.set_service_instance_ip(True)
iip_obj.set_instance_ip_secondary(True)
iip_obj.set_instance_ip_mode(si.ha_mode)
try:
self._vnc_lib.instance_ip_create(iip_obj)
self._vnc_lib.ref_relax_for_delete(iip_id, vn_obj.uuid)
except RefsExistError:
self._vnc_lib.instance_ip_update(iip_obj)
except Exception as e:
return
iip_id = iip_obj.uuid
tag = ServiceInterfaceTag(interface_type=port['type'])
self._vnc_lib.ref_update('service-instance', si.uuid,
'instance-ip', iip_id, None, 'ADD', tag)
InstanceIpSM.locate(iip_id)
si.update()
if create_iip or update_vmi:
self._vnc_lib.ref_update('instance-ip', iip_id,
'virtual-machine-interface', vmi.uuid, None, 'ADD')
self._vnc_lib.ref_relax_for_delete(iip_id, vmi.uuid)
vmi.update()
return
def _allocate_shared_iip(self, si, port, vmi, vmi_obj):
self._allocate_iip_for_family('v4', si, port, vmi)
self._allocate_iip_for_family('v6', si, port, vmi)
return
def set_port_service_health_check(self, port, vmi):
if port['service-health-check'] and not vmi.service_health_check:
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'service-health-check', port['service-health-check'], None, 'ADD')
vmi.update()
def set_port_static_routes(self, port, vmi):
if port['interface-route-table'] and not vmi.interface_route_table:
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'interface-route-table', port['interface-route-table'], None, 'ADD')
vmi.update()
def set_secondary_ip_tracking_ip(self, vmi):
for iip_id in vmi.instance_ips:
iip = InstanceIpSM.get(iip_id)
if not iip or not iip.instance_ip_secondary:
continue
if iip.secondary_tracking_ip == vmi.aaps[0]['ip']:
continue
iip_obj = self._vnc_lib.instance_ip_read(id=iip.uuid)
iip_obj.set_secondary_ip_tracking_ip(vmi.aaps[0]['ip'])
self._vnc_lib.instance_ip_update(iip_obj)
iip.update(iip_obj.serialize_to_json())
def set_port_allowed_address_pairs(self, port, vmi, vmi_obj):
if not port['allowed-address-pairs']:
return
aaps = port['allowed-address-pairs'].get('allowed_address_pair', None)
if not aaps:
return
update_aap = False
if len(aaps) != len(vmi.aaps or []):
update_aap = True
else:
for idx in range(0, len(vmi.aaps)):
if vmi.aaps[idx]['ip'] != aaps[idx]['ip']:
update_aap = True
break
if update_aap:
vmi_obj.set_virtual_machine_interface_allowed_address_pairs(
port['allowed-address-pairs'])
self._vnc_lib.virtual_machine_interface_update(vmi_obj)
vmi.update()
self.set_secondary_ip_tracking_ip(vmi)
def delete_shared_iip(self, iip):
if not iip.service_instance_ip or not iip.instance_ip_secondary:
return
if iip.service_instance:
return
for vmi_id in iip.virtual_machine_interfaces:
self._vnc_lib.ref_update('instance-ip', iip.uuid,
'virtual-machine-interface', vmi_id, None, 'DELETE')
try:
self._vnc_lib.instance_ip_delete(id=iip.uuid)
InstanceIpSM.delete(iip.uuid)
except NoIdError:
return
def delete_old_vmi_links(self, vmi):
for iip_id in list(vmi.instance_ips):
iip = InstanceIpSM.get(iip_id)
if not iip or not iip.service_instance:
continue
self._vnc_lib.ref_update('instance-ip', iip_id,
'virtual-machine-interface', vmi.uuid, None, 'DELETE')
vmi.instance_ips.remove(iip_id)
irt = InterfaceRouteTableSM.get(vmi.interface_route_table)
if irt and irt.service_instance:
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'interface-route-table', irt.uuid, None, 'DELETE')
vmi.interface_route_table = None
health = ServiceHealthCheckSM.get(vmi.service_health_check)
if health and health.service_instance:
self._vnc_lib.ref_update('virtual-machine-interface', vmi.uuid,
'service-health-check', health.uuid, None, 'DELETE')
vmi.service_health_check = None
def set_port_service_chain_ip(self, si, port, vmi, vmi_obj):
self._allocate_shared_iip(si, port, vmi, vmi_obj)
def get_port_config(self, st, si):
st_if_list = st.params.get('interface_type', [])
si_if_list = si.params.get('interface_list', [])
port_config = {}
for index in range(0, len(st_if_list)):
try:
si_if = si_if_list[index]
st_if = st_if_list[index]
except IndexError:
continue
port = {}
port['type'] = st_if.get('service_interface_type')
port['shared-ip'] = st_if.get('shared_ip')
port['static-route-enable'] = st_if.get('static_route_enable')
port['allowed-address-pairs'] = si_if.get('allowed_address_pairs')
port['interface-route-table'] = None
for irt_id in si.interface_route_tables:
irt = InterfaceRouteTableSM.get(irt_id)
if irt and irt.service_interface_tag == port['type']:
port['interface-route-table'] = irt.uuid
break
port['service-health-check'] = None
for health_id in si.service_health_checks:
health = ServiceHealthCheckSM.get(health_id)
if health and health.service_interface_tag == port['type']:
port['service-health-check'] = health.uuid
break
port_config[st_if.get('service_interface_type')] = port
return port_config
def update_port_tuple(self, vmi):
if not vmi.port_tuple:
self.delete_old_vmi_links(vmi)
return
pt = PortTupleSM.get(vmi.port_tuple)
if not pt:
return
si = ServiceInstanceSM.get(pt.parent_key)
if not si:
return
st = ServiceTemplateSM.get(si.service_template)
port_config = self.get_port_config(st, si)
if not port_config:
return
for vmi_id in pt.virtual_machine_interfaces:
vmi = VirtualMachineInterfaceSM.get(vmi_id)
if not vmi:
continue
if not vmi.params:
continue
port = port_config[vmi.params.get('service_interface_type')]
if not port:
continue
vmi_obj = VirtualMachineInterface(fq_name=vmi.fq_name,
name=vmi.name, parent_type='project')
vmi_obj.uuid = vmi.uuid
self.set_port_service_chain_ip(si, port, vmi, vmi_obj)
self.set_port_allowed_address_pairs(port, vmi, vmi_obj)
self.set_port_service_health_check(port, vmi)
self.set_port_static_routes(port, vmi)
def update_port_tuples(self):
for si in ServiceInstanceSM.values():
for pt_id in si.port_tuples:
self.update_port_tuple(pt_id)
for iip in InstanceIpSM.values():
self.delete_shared_iip(iip)
| apache-2.0 | 6,404,146,252,920,576,000 | 38.83682 | 84 | 0.573679 | false | 3.543357 | true | false | false |
muddyfish/PYKE | node/deep_for.py | 1 | 2419 | import copy
from nodes import Node
class DeepFor(Node):
char = ".F"
args = None
results = None
default_arg = 1
def __init__(self, args: Node.NumericLiteral, ast:Node.EvalLiteral):
self.args = args
self.ast = ast
if self.ast.nodes == []:
self.ast.add_node(b"\n")
@Node.test_func([[[[0], 1, 2, 3], [4, 5, 6, 7]]], [[[[2], 4, 6, 8], [10, 12, 14, 16]]], "h}")
@Node.test_func([[1, [[2, 3, [4], 5], 6], 7]], [[2, [[2, 4, [4], 6], 6], 8]], "D 2%+")
def func(self, *args):
"""Deeply run a for loop across a nD tree.
Takes a list or tuple with a varying depth.
Returns a list with the same depth all round with the function applied."""
seq, *args = copy.deepcopy(args)
assert(isinstance(seq, Node.sequence))
self.type = None
self.shared_type = False
rtn = self.recurse(seq, args)
if self.type is None or self.shared_type:
return [rtn]
return [self.recurse(seq, args, run_func=self.cleanup)]
def recurse(self, seq, args, run_func=None):
not_overwritten = run_func is None
if not_overwritten:
run_func = self.run
rtn = []
for i in seq:
if isinstance(i, Node.sequence):
if not_overwritten:
rtn.append(self.recurse(i, args))
else:
rtn.append(self.recurse(i, args, run_func))
else:
rtn.append(run_func(i, args))
if not_overwritten:
self.get_type(rtn[-1])
return rtn
def run(self, obj, args):
rtn = self.ast.run([obj]+args)
if len(rtn) == 1: rtn = rtn[0]
return rtn
def cleanup(self, obj, args):
obj = self.run(obj, args)
if obj:
return obj
else:
return self.type
def get_type(self, obj):
if obj:
rtn_type = {str: "",
int: 0,
list: [],
dict: {},
tuple: (),
set: set(),
bool: False}.get(type(obj), None)
if self.type is None:
self.type = rtn_type
elif self.type == rtn_type:
pass
else:
self.shared_type = True
return obj
| mit | 7,516,931,946,032,238,000 | 30.415584 | 97 | 0.468375 | false | 3.69313 | false | false | false |
5GExchange/nffg | nffg.py | 2 | 140053 | # Copyright 2017 Janos Czentye, Balazs Nemeth, Balazs Sonkoly
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Abstract class and implementation for basic operations with a single NF-FG, such
as building, parsing, processing NF-FG, helper functions, etc.
"""
import copy
import itertools
import json
import logging
import math
import pprint
import re
from collections import defaultdict, OrderedDict
from copy import deepcopy
import networkx as nx
from networkx.exception import NetworkXError
from .nffg_elements import (Node, NodeNF, NodeInfra, NodeResource, NodeSAP,
Link, EdgeSGLink, EdgeLink, EdgeReq, Port, Flowrule,
NFFGModel, Element)
VERSION = "1.0"
VERBOSE = 5
class AbstractNFFG(object):
"""
Abstract class for managing single NF-FG data structure.
The NF-FG data model is described in YANG. This class provides the
interfaces with the high level data manipulation functions.
"""
__slots__ = ()
# Default domain value
DEFAULT_DOMAIN = NodeInfra.DEFAULT_DOMAIN
"""Default domain value"""
# Infra types
TYPE_INFRA_SDN_SW = NodeInfra.TYPE_SDN_SWITCH
TYPE_INFRA_EE = NodeInfra.TYPE_EE
TYPE_INFRA_STATIC_EE = NodeInfra.TYPE_STATIC_EE
TYPE_INFRA_BISBIS = NodeInfra.TYPE_BISBIS
# Node types
TYPE_INFRA = Node.INFRA
TYPE_NF = Node.NF
TYPE_SAP = Node.SAP
# Link types
TYPE_LINK_STATIC = Link.STATIC
TYPE_LINK_DYNAMIC = Link.DYNAMIC
TYPE_LINK_SG = Link.SG
TYPE_LINK_REQUIREMENT = Link.REQUIREMENT
# Port constants
PORT_ROLE_CONSUMER = Port.ROLE_CONSUMER
PORT_ROLE_PROVIDER = Port.ROLE_PROVIDER
# Mapping mode operations
MODE_ADD = "ADD"
MODE_DEL = "DELETE"
MODE_REMAP = "REMAP"
# Element operation
OP_CREATE = Element.OP_CREATE
OP_REPLACE = Element.OP_REPLACE
OP_MERGE = Element.OP_MERGE
OP_REMOVE = Element.OP_REMOVE
OP_DELETE = Element.OP_DELETE
# Element status
STATUS_INIT = Element.STATUS_INIT
STATUS_PENDING = Element.STATUS_PENDING
STATUS_DEPLOY = Element.STATUS_DEPLOY
STATUS_RUN = Element.STATUS_RUN
STATUS_STOP = Element.STATUS_STOP
STATUS_FAIL = Element.STATUS_FAIL
# Mapping process status
MAP_STATUS_SKIPPED = "SKIPPED" # mark NFFG as skipped for ESCAPE
version = VERSION
##############################################################################
# NFFG specific functions
##############################################################################
def add_nf (self):
"""
Add a single NF node to the NF-FG.
"""
raise NotImplementedError
def add_sap (self):
"""
Add a single SAP node to the NF-FG.
"""
raise NotImplementedError
def add_infra (self):
"""
Add a single infrastructure node to the NF-FG.
"""
raise NotImplementedError
def add_link (self, src, dst):
"""
Add a static or dynamic infrastructure link to the NF-FG.
:param src: source port
:param dst: destination port
"""
raise NotImplementedError
def add_sglink (self, src, dst):
"""
Add an SG link to the NF-FG.
:param src: source port
:param dst: destination port
"""
raise NotImplementedError
def add_req (self, src, dst):
"""
Add a requirement link to the NF-FG.
:param src: source port
:param dst: destination port
"""
raise NotImplementedError
def add_node (self, node):
"""
Add a single node to the NF-FG.
:param node: node object
"""
raise NotImplementedError
def del_node (self, id):
"""
Remove a single node from the NF-FG.
:param id: id of the node
"""
raise NotImplementedError
def add_edge (self, src, dst, link):
"""
Add an edge to the NF-FG.
:param src: source port
:param dst: destination port
:param link: link object
"""
raise NotImplementedError
def del_edge (self, src, dst):
"""
Remove an edge from the NF-FG.
:param src: source port
:param dst: destination port
"""
raise NotImplementedError
##############################################################################
# General functions for create/parse/dump/convert NFFG
##############################################################################
@classmethod
def parse (cls, data):
"""
General function for parsing data as a new :any::`NFFG` object and return
with its reference.
:param data: raw data
:type data: str
:return: parsed NFFG as an XML object
:rtype: :class:`Virtualizer`
"""
raise NotImplementedError
def dump (self):
"""
General function for dumping :any::`NFFG` according to its format to
plain text.
:return: plain text representation
:rtype: str
"""
raise NotImplementedError
class NFFG(AbstractNFFG):
"""
Internal NFFG representation based on networkx.
"""
__slots__ = ('network', 'id', 'name', 'service_id', 'metadata', 'mode',
'status', 'version')
def __init__ (self, id=None, name=None, service_id=None, mode=None,
metadata=None, status=None, version=VERSION):
"""
Init.
:param id: optional NF-FG identifier (generated by default)
:type id: str or int
:param name: optional NF-FG name (generated by default)
:type name: str
:param service_id: service id this NFFG is originated from
:type service_id: str or int
:param mode: describe how to handle the defined elements (default: ADD)
:type mode: str
:param metadata: optional metadata for NFFG
:type metadata: dict
:param status: optional info for NFFG
:type status: str
:param version: optional version (default: 1.0)
:type version: str
:return: None
"""
super(NFFG, self).__init__()
self.network = nx.MultiDiGraph()
self.id = str(id) if id is not None else Element.generate_unique_id()
self.name = name
self.service_id = service_id
self.metadata = OrderedDict(metadata if metadata else ())
self.mode = mode
self.status = status
self.version = version
##############################################################################
# Element iterators
##############################################################################
@property
def nfs (self):
"""
Iterate over the NF nodes.
:return: iterator of NFs
:rtype: collections.Iterator
"""
return (node for id, node in self.network.nodes_iter(data=True) if
node.type == Node.NF)
@property
def saps (self):
"""
Iterate over the SAP nodes.
:return: iterator of SAPs
:rtype: collections.Iterator
"""
return (node for id, node in self.network.nodes_iter(data=True) if
node.type == Node.SAP)
@property
def infras (self):
"""
Iterate over the Infra nodes.
:return: iterator of Infra node
:rtype: collections.Iterator
"""
return (node for id, node in self.network.nodes_iter(data=True) if
node.type == Node.INFRA)
@property
def links (self):
"""
Iterate over the link edges.
:return: iterator of edges
:rtype: collections.Iterator
"""
return (link for src, dst, link in self.network.edges_iter(data=True) if
link.type == Link.STATIC or link.type == Link.DYNAMIC)
@property
def sg_hops (self):
"""
Iterate over the service graph hops.
:return: iterator of SG edges
:rtype: collections.Iterator
"""
return (link for s, d, link in self.network.edges_iter(data=True) if
link.type == Link.SG)
@property
def reqs (self):
"""
Iterate over the requirement edges.
:return: iterator of requirement edges
:rtype: collections.Iterator
"""
return (link for s, d, link in self.network.edges_iter(data=True) if
link.type == Link.REQUIREMENT)
##############################################################################
# Magic functions mostly for dict specific behaviour
##############################################################################
def __str__ (self):
"""
Return the string representation.
:return: string representation
:rtype: str
"""
return "NFFG(id=%s name=%s, version=%s)" % (
self.id, self.name, self.version)
def __contains__ (self, item):
"""
Return True if item exist in the NFFG, False otherwise.
:param item: node object or id
:type item: :any:`Node` or str
:return: item is in the NFFG
:rtype: bool
"""
if isinstance(item, Node):
item = item.id
return item in self.network
def __iter__ (self, data=False):
"""
Return an iterator over the nodes.
:param data: If True return a two-tuple of node and node data dictionary
:type data: bool
:return: An iterator over nodes.
"""
return self.network.nodes_iter(data=data)
def __len__ (self):
"""
Return the number of nodes.
:return: number of nodes
:rtype: int
"""
return len(self.network)
def __getitem__ (self, item):
"""
Return the object given by the id: item.
:param item: node id
:return: node object
"""
return self.network.node[item]
##############################################################################
# Builder design pattern related functions
##############################################################################
def add_node (self, node):
"""
Add a Node to the structure.
:param node: a Node object
:type node: :any:`Node`
:return: None
"""
self.network.add_node(node.id)
self.network.node[node.id] = node
def del_node (self, node):
"""
Remove the node from the structure.
:param node: node id or node object or a port object of the node
:type node: str or :any:`Node` or :any`Port`
:return: the actual node is found and removed or not
:rtype: bool
"""
try:
if isinstance(node, Node):
node = node.id
elif isinstance(node, Port):
node = node.node.id
self.network.remove_node(node)
return True
except NetworkXError:
# There was no node in the graph
return False
def add_edge (self, src, dst, link):
"""
Add an Edge to the structure.
:param src: source node id or Node object or a Port object
:type src: str or :any:`Node` or :any`Port`
:param dst: destination node id or Node object or a Port object
:type dst: str or :any:`Node` or :any`Port`
:param link: edge data object
:type link: :any:`Link`
:return: None
"""
if isinstance(src, Node):
src = src.id
elif isinstance(src, Port):
src = src.node.id
if isinstance(dst, Node):
dst = dst.id
elif isinstance(dst, Port):
dst = dst.node.id
self.network.add_edge(src, dst, key=link.id)
self.network[src][dst][link.id] = link
def del_edge (self, src, dst, id=None):
"""
Remove the edge(s) between two nodes.
:param src: source node id or Node object or a Port object
:type src: str or :any:`Node` or :any`Port`
:param dst: destination node id or Node object or a Port object
:type dst: str or :any:`Node` or :any`Port`
:param id: unique id of the edge (otherwise remove all)
:type id: str or int
:return: the actual node is found and removed or not
:rtype: bool
"""
try:
if isinstance(src, Node):
src = src.id
elif isinstance(src, Port):
src = src.node.id
if isinstance(dst, Node):
dst = dst.id
elif isinstance(dst, Port):
dst = dst.node.id
if id is not None:
self.network.remove_edge(src, dst, key=id)
else:
self.network[src][dst].clear()
return True
except NetworkXError:
# There was no node in the graph
return False
def add_nf (self, nf=None, id=None, name=None, func_type=None, dep_type=None,
cpu=None, mem=None, storage=None, cost=None, delay=None,
bandwidth=None):
"""
Add a Network Function to the structure.
:param nf: add this explicit NF object instead of create one
:type nf: :any:`NodeNF`
:param id: optional id
:type id: str or ints
:param name: optional name
:type name: str
:param func_type: functional type (default: "None")
:type func_type: str
:param dep_type: deployment type (default: "None")
:type dep_type: str
:param cpu: CPU resource
:type cpu: float
:param mem: memory resource
:type mem: float
:param storage: storage resource
:type storage: float
:type cost: float
:param cost: NF cost deployement limit.
:param delay: delay property of the Node
:type delay: float
:param bandwidth: bandwidth property of the Node
:type bandwidth: float
:return: newly created node
:rtype: :any:`NodeNF`
"""
if nf is None:
if any(i is not None for i in (cpu, mem, storage, delay, bandwidth)):
res = NodeResource(cpu=cpu, mem=mem, storage=storage, delay=delay,
bandwidth=bandwidth, cost=cost)
else:
res = None
nf = NodeNF(id=id, name=name, func_type=func_type, dep_type=dep_type,
res=res)
self.add_node(nf)
return nf
def add_sap (self, sap_obj=None, id=None, name=None, binding=None, sap=None,
technology=None, delay=None, bandwidth=None, cost=None,
controller=None, orchestrator=None, l2=None, l4=None,
metadata=None):
"""
Add a Service Access Point to the structure.
:param sap_obj: add this explicit SAP object instead of create one
:type sap_obj: :any:`NodeSAP`
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param binding: interface binding
:type binding: str
:param sap: inter-domain SAP identifier
:type sap: str
:param technology: technology
:type technology: str
:param delay: delay
:type delay: float
:param bandwidth: bandwidth
:type bandwidth: float
:param cost: cost
:type cost: str
:param controller: controller
:type controller: str
:param orchestrator: orchestrator
:type orchestrator: str
:param l2: l2
:param l2: str
:param l4: l4
:type l4: str
:param metadata: metadata related to Node
:type metadata: dict
:return: newly created node
:rtype: :any:`NodeSAP`
"""
if sap_obj is None:
sap_obj = NodeSAP(id=id, name=name, binding=binding, metadata=metadata)
self.add_node(sap_obj)
return sap_obj
def add_infra (self, infra=None, id=None, name=None, domain=None,
infra_type=None, cpu=None, mem=None, storage=None, cost=None,
zone=None, delay=None, bandwidth=None):
"""
Add an Infrastructure Node to the structure.
:param infra: add this explicit Infra object instead of create one
:type infra: :any:`NodeInfra`
:param id: optional id
:type id: str or int
:param name: optional name
:type name: str
:param domain: domain of the Infrastructure Node (default: None)
:type domain: str
:param infra_type: type of the Infrastructure Node (default: 0)
:type infra_type: int or str
:param cpu: CPU resource
:type cpu: float
:param mem: memory resource
:type mem: float
:param storage: storage resource
:type storage: float
:param cost: cost
:type cost: str
:param zone: zone
:type zone: str
:param delay: delay property of the Node
:type delay: float
:param bandwidth: bandwidth property of the Node
:type bandwidth: float
:return: newly created node
:rtype: :any:`NodeInfra`
"""
if infra is None:
if any(i is not None for i in (cpu, mem, storage, delay, bandwidth)):
res = NodeResource(cpu=cpu, mem=mem, storage=storage, cost=cost,
zone=zone, bandwidth=bandwidth, delay=delay)
else:
res = None
infra = NodeInfra(id=id, name=name, domain=domain, infra_type=infra_type,
res=res)
self.add_node(infra)
return infra
def add_link (self, src_port, dst_port, link=None, id=None, dynamic=False,
backward=False, delay=None, bandwidth=None, cost=None,
qos=None):
"""
Add a Link to the structure.
:param link: add this explicit Link object instead of create one
:type link: :any:`EdgeLink`
:param src_port: source port
:type src_port: :any:`Port`
:param dst_port: destination port
:type dst_port: :any:`Port`
:param id: optional link id
:type id: str or int
:param backward: the link is a backward link compared to an another Link
:type backward: bool
:param delay: delay resource
:type delay: float
:param dynamic: set the link dynamic (default: False)
:type dynamic: bool
:param bandwidth: bandwidth resource
:type bandwidth: float
:param cost: cost
:type cost: str
:param qos: traffic QoS class
:type qos: str
:return: newly created edge
:rtype: :any:`EdgeLink`
"""
if link is None:
type = Link.DYNAMIC if dynamic else Link.STATIC
link = EdgeLink(src=src_port, dst=dst_port, type=type, id=id,
backward=backward, delay=delay, bandwidth=bandwidth,
cost=cost, qos=qos)
else:
link.src, link.dst = src_port, dst_port
self.add_edge(src_port.node, dst_port.node, link)
return link
def add_undirected_link (self, port1, port2, p1p2id=None, p2p1id=None,
dynamic=False, delay=None, bandwidth=None,
cost=None, qos=None):
"""
Add two Links to the structure, in both directions.
:param port1: source port
:type port1: :any:`Port`
:param port2: destination port
:type port2: :any:`Port`
:param p1p2id: optional link id from port1 to port2
:type p1p2id: str or int
:param p2p1id: optional link id from port2 to port1
:type p2p1id: str or int
:param delay: delay resource of both links
:type delay: float
:param dynamic: set the link dynamic (default: False)
:type dynamic: bool
:param bandwidth: bandwidth resource of both links
:type bandwidth: float
:param cost: cost
:type cost: str
:param qos: traffic QoS class
:type qos: str
:return: newly created edge tuple in (p1->p2, p2->p1)
:rtype: :any:(`EdgeLink`, `EdgeLink`)
"""
p1p2Link = self.add_link(port1, port2, id=p1p2id, dynamic=dynamic,
backward=False, delay=delay, bandwidth=bandwidth,
cost=cost, qos=qos)
p2p1Link = self.add_link(port2, port1, id=p2p1id, dynamic=dynamic,
backward=True, delay=delay, bandwidth=bandwidth,
cost=cost, qos=qos)
return p1p2Link, p2p1Link
def add_sglink (self, src_port, dst_port, hop=None, id=None, flowclass=None,
tag_info=None, delay=None, bandwidth=None, constraints=None,
additional_actions=None):
"""
Add a SG next hop edge to the structure.
:param hop: add this explicit SG Link object instead of create one
:type hop: :any:`EdgeSGLink`
:param src_port: source port
:type src_port: :any:`Port`
:param dst_port: destination port
:type dst_port: :any:`Port`
:param id: optional link id
:type id: str or int
:param flowclass: flowclass of SG next hop link
:type flowclass: str
:param tag_info: tag info
:type tag_info: str
:param delay: delay requested on link
:type delay: float
:param bandwidth: bandwidth requested on link
:type bandwidth: float
:param constraints: optional Constraints object
:type constraints: :class:`Constraints`
:param additional_actions: additional actions
:type additional_actions: str
:return: newly created edge
:rtype: :any:`EdgeSGLink`
"""
if hop is None:
hop = EdgeSGLink(src=src_port, dst=dst_port, id=id, flowclass=flowclass,
tag_info=tag_info, bandwidth=bandwidth, delay=delay,
constraints=constraints,
additional_actions=additional_actions)
self.add_edge(src_port.node, dst_port.node, hop)
return hop
def add_req (self, src_port, dst_port, req=None, id=None, delay=None,
bandwidth=None, sg_path=None):
"""
Add a requirement edge to the structure.
:param req: add this explicit Requirement Link object instead of create one
:type req: :any:`EdgeReq`
:param src_port: source port
:type src_port: :any:`Port`
:param dst_port: destination port
:type dst_port: :any:`Port`
:param id: optional link id
:type id: str or int
:param delay: delay resource
:type delay: float
:param bandwidth: bandwidth resource
:type bandwidth: float
:param sg_path: list of ids of sg_links represents end-to-end requirement
:type sg_path: list or tuple
:return: newly created edge
:rtype: :any:`EdgeReq`
"""
if req is None:
req = EdgeReq(src=src_port, dst=dst_port, id=id, delay=delay,
bandwidth=bandwidth, sg_path=sg_path)
self.add_edge(src_port.node, dst_port.node, req)
return req
def add_metadata (self, name, value):
"""
Add metadata with the given `name`.
:param name: metadata name
:type name: str
:param value: metadata value
:type value: str
:return: the :class:`NFFG` object to allow function chaining
:rtype: :class:`NFFG`
"""
self.metadata[name] = value
return self
def get_metadata (self, name):
"""
Return the value of metadata.
:param name: name of the metadata
:type name: str
:return: metadata value
:rtype: str
"""
return self.metadata.get(name)
def del_metadata (self, name):
"""
Remove the metadata from the :class:`NFFG`. If no metadata is given all the
metadata will be removed.
:param name: name of the metadata
:type name: str
:return: removed metadata or None
:rtype: str or None
"""
if name is None:
self.metadata.clear()
else:
return self.metadata.pop(name, None)
def dump (self):
"""
Convert the NF-FG structure to a NFFGModel format and return the plain
text representation.
:return: text representation
:rtype: str
"""
# Create the model
nffg = NFFGModel(id=self.id, name=self.name, service_id=self.service_id,
version=self.version, mode=self.mode,
metadata=self.metadata)
# Load Infras
for infra in self.infras:
nffg.node_infras.append(infra)
# Load SAPs
for sap in self.saps:
nffg.node_saps.append(sap)
# Load NFs
for nf in self.nfs:
nffg.node_nfs.append(nf)
# Load Links
for link in self.links:
nffg.edge_links.append(link)
# Load SG next hops
for hop in self.sg_hops:
nffg.edge_sg_nexthops.append(hop)
# Load Requirements
for req in self.reqs:
nffg.edge_reqs.append(req)
# Dump
return nffg.dump()
def dump_to_json (self):
"""
Return the NF-FG structure in JSON compatible format.
:return: NFFG as a valid JSON
:rtype: dict
"""
return json.loads(self.dump())
@classmethod
def parse (cls, raw_data):
"""
Read the given JSON object structure and try to convert to an NF-FG
representation as an :class:`NFFG`
:param raw_data: raw NF-FG description as a string
:type raw_data: str
:return: the parsed NF-FG representation
:rtype: :class:`NFFG`
"""
# Parse text
model = NFFGModel.parse(raw_data)
# Create new NFFG
nffg = NFFG(id=model.id, name=model.name, service_id=model.service_id,
version=model.version, mode=model.mode, metadata=model.metadata)
# Load Infras
for infra in model.node_infras:
nffg.add_node(infra)
# Load SAPs
for sap in model.node_saps:
nffg.add_node(sap)
# Load NFs
for nf in model.node_nfs:
nffg.add_node(nf)
# Load Links
for link in model.edge_links:
if link.src.node.type == NFFG.TYPE_NF or \
link.dst.node.type == NFFG.TYPE_NF:
link.type = str(NFFG.TYPE_LINK_DYNAMIC)
nffg.add_edge(link.src.node, link.dst.node, link)
# Load SG next hops
for hop in model.edge_sg_nexthops:
nffg.add_edge(hop.src.node, hop.dst.node, hop)
# Load Requirements
for req in model.edge_reqs:
nffg.add_edge(req.src.node, req.dst.node, req)
return nffg
@staticmethod
def parse_from_file (path):
"""
Parse NFFG from file given by the path.
:param path: file path
:type path: str
:return: the parsed NF-FG representation
:rtype: :class:`NFFG`
"""
with open(path) as f:
return NFFG.parse(f.read())
##############################################################################
# Helper functions
##############################################################################
def is_empty (self):
"""
Return True if the NFFG contains no Node.
:return: :class:`NFFG` object is empty or not
:rtype: bool
"""
return len(self.network) == 0
def is_infrastructure (self):
"""
Return True if the NFFG is an infrastructure view with Infrastructure nodes.
:return: the NFFG is an infrastructure view
:rtype: bool
"""
return sum([1 for i in self.infras]) != 0
def is_SBB (self):
"""
Return True if the topology detected as a trivial SingleBiSBiS view,
which consist of only one Infra node with type: ``BiSBiS``.
:return: SingleBiSBiS or not
:rtype: bool
"""
itype = [i.infra_type for i in self.infras]
return len(itype) == 1 and itype.pop() == self.TYPE_INFRA_BISBIS
def is_bare (self):
"""
Return True if the topology does not contain any NF or flowrules need to
install or remap.
:return: is bare topology or not
:rtype: bool
"""
# If there is no VNF
if len([v for v in self.nfs]) == 0:
fr_sum = sum([sum(1 for fr in i.ports.flowrules) for i in self.infras])
# And there is no flowrule in the ports
if fr_sum == 0:
sg_sum = len([sg for sg in self.sg_hops])
# And there is not SG hop
if sg_sum == 0:
e2e_sum = len([sg for sg in self.reqs])
if e2e_sum == 0:
return True
return False
def is_virtualized (self):
"""
Return True if the topology contains at least one virtualized BiSBiS node.
:return: contains any NF or not
:rtype: bool
"""
return len([i for i in self.infras if
i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE,
self.TYPE_INFRA_STATIC_EE)]) > 0
def get_stat (self):
"""
:return:
"""
return dict(infras=[i.id for i in self.infras],
nfs=[n.id for n in self.nfs],
saps=[s.id for s in self.saps],
sg_hops=[h.id for h in self.sg_hops])
def real_neighbors_iter (self, node):
"""
Return with an iterator over the id of neighbours of the given Node not
counting the SG and E2E requirement links.
:param node: examined :any:`Node` id
:type node: str or int
:return: iterator over the filtered neighbors
:rtype: iterator
"""
return (v for u, v, link in self.network.out_edges_iter(node, data=True)
if link.type in (self.TYPE_LINK_STATIC, self.TYPE_LINK_DYNAMIC))
def real_out_edges_iter (self, node):
"""
Return with an iterator over the out edge data of the given Node not
counting the SG and E2E requirement links.
:param node: examined :any:`Node` id
:type node: str or int
:return: iterator over the filtered neighbors (u,v,d)
:rtype: iterator
"""
return (data for data in self.network.out_edges_iter(node, data=True)
if data[2].type in (self.TYPE_LINK_STATIC, self.TYPE_LINK_DYNAMIC))
def duplicate_static_links (self):
"""
Extend the NFFG model with backward links for STATIC links to fit for the
orchestration algorithm.
STATIC links: infra-infra, infra-sap
:return: NF-FG with the duplicated links for function chaining
:rtype: :class:`NFFG`
"""
# Create backward links
backwards = [EdgeLink(src=link.dst, dst=link.src, id=str(link.id) + "-back",
backward=True, delay=link.delay,
bandwidth=link.bandwidth) for u, v, link in
self.network.edges_iter(data=True) if link.type == Link.STATIC]
# Add backward links to the NetworkX structure in a separate step to
# avoid the link reduplication caused by the iterator based for loop
for link in backwards:
self.add_edge(src=link.src, dst=link.dst, link=link)
return self
def merge_duplicated_links (self):
"""
Detect duplicated STATIC links which both are connected to the same
Port/Node and have switched source/destination direction to fit for the
simplified NFFG dumping.
Only leaves one of the links, but that's not defined which one.
:return: NF-FG with the filtered links for function chaining
:rtype: :class:`NFFG`
"""
# Collect backward links
backwards = [(src, dst, key) for src, dst, key, link in
self.network.edges_iter(keys=True, data=True) if (
link.type == Link.STATIC or link.type == Link.DYNAMIC) and
link.backward is True]
# Delete backwards links
for link in backwards:
self.network.remove_edge(*link)
return self
def adjacent_sghops (self, nf_id):
"""
Returns a list with the outbound or inbound SGHops from an NF.
:param nf_id: nf node id
:type nf_id: :class:`NodeNf`
:return: list
"""
return [sg for sg in self.sg_hops if sg.src.node.id == nf_id or \
sg.dst.node.id == nf_id]
def infra_neighbors (self, node_id):
"""
Return an iterator for the Infra nodes which are neighbours of the given
node.
:param node_id: infra node
:type node_id: :any:`NodeInfra`
:return: iterator for the list of Infra nodes
"""
return (self.network.node[id] for id in self.network.neighbors_iter(node_id)
if self.network.node[id].type == Node.INFRA)
def running_nfs (self, infra_id):
"""
Return an iterator for the NodeNFs which are mapped to the given Infra node.
:param infra_id: infra node identifier
:type infra_id: :any: `NodeInfra`
:return: iterator for the currently running NodeNFs
"""
return (self.network.node[id] for id in
self.network.neighbors_iter(infra_id) if
self.network.node[id].type == Node.NF)
def get_domain_of_nf (self, nf_id):
bb = [bb for bb in self.infra_neighbors(nf_id)]
return bb.pop().domain if len(bb) == 1 else None
def strip (self):
"""
Remove all NF and Flowrule from NFFG.
:return: stripped NFFG
:rtype: :class:`NFFG`
"""
nfs = [nf for nf in self.nfs]
for nf in nfs:
self.del_node(node=nf)
for node in self.infras:
for port in node.ports:
port.clear_flowrules()
def clear_links (self, link_type):
"""
Remove every specific Link from the NFFG defined by given ``type``.
:param link_type: link type defined in :class:`NFFG`
:type link_type: str
:return: None
"""
return self.network.remove_edges_from(
[(u, v, link.id) for u, v, link in self.network.edges_iter(data=True) if
link.type == link_type])
def clear_nodes (self, node_type):
"""
Remove every specific Node from the NFFG defined by given ``type``.
:param node_type: node type defined in :class:`NFFG`
:type node_type: str
:return: None
"""
return self.network.remove_nodes_from(
[id for id, node in self.network.nodes_iter(data=True) if
node.type == node_type])
def copy (self):
"""
Return the deep copy of the NFFG object.
:return: deep copy
:rtype: :class:`NFFG`
"""
# copy = NFFG(id=self.id, name=self.name, version=self.version,
# mode=self.mode, metadata=self.metadata.copy(),
# status=self.status)
# copy.network = self.network.copy()
# return copy
from copy import deepcopy
return deepcopy(self)
def calculate_available_link_res (self, sg_hops_to_be_ignored,
mode=AbstractNFFG.MODE_ADD):
"""
Calculates available bandwidth on all the infrastructure links.
Stores them in 'availbandwidth' field of the link objects.
Modifies the NFFG instance.
:param sg_hops_to_be_ignored: container for ID-s which should be ignored
:type sg_hops_to_be_ignored: collections.Iterable
:param mode: Determines whether the flowrules should be considered.
:type mode: str
:return: None
"""
# set availbandwidth to the maximal value
for i, j, k, d in self.network.edges_iter(data=True, keys=True):
if d.type == 'STATIC':
setattr(self.network[i][j][k], 'availbandwidth', d.bandwidth)
# subtract the reserved link and internal (inside Infras) bandwidth
if mode == self.MODE_ADD:
for d in self.infras:
for p in d.ports:
for fr in p.flowrules:
if fr.id not in sg_hops_to_be_ignored and fr.bandwidth is not None:
# Flowrules are cumulatively subtracted from the switching
# capacity of the node.
d.availres['bandwidth'] -= fr.bandwidth
if d.availres['bandwidth'] < 0:
raise RuntimeError("The node bandwidth of %s got below zero "
"during available resource calculation!" %
d.id)
# Get all the mapped paths of all SGHops from the NFFG
sg_map = NFFGToolBox.get_all_sghop_info(self, return_paths=True)
for sg_hop_id, data in sg_map.iteritems():
src, dst, flowclass, bandwidth, delay, constraints, \
additional_actions, path = data
if bandwidth is not None:
for link in path:
link.availbandwidth -= bandwidth
if link.availbandwidth < 0:
raise RuntimeError(
"The link bandwidth of %s got below zero during"
"available resource calculation!" % link.id)
def calculate_available_node_res (self, vnfs_to_be_left_in_place=None,
mode=AbstractNFFG.MODE_ADD):
"""
Calculates available computation and networking resources of the nodes of
NFFG. Creates a NodeResource instance for each NodeInfra to store the
available resources in the 'availres' attribute added by this fucntion.
:param vnfs_to_be_left_in_place: NodeNF.id-s to be ignored subtraction.
:type vnfs_to_be_left_in_place: dict
:param mode: Determines whether the running NFs should be considered.
:return: None
"""
# add available res attribute to all Infras and subtract the running
# NFs` resources from the given max res
if vnfs_to_be_left_in_place is None:
vnfs_to_be_left_in_place = {}
for n in self.infras:
setattr(self.network.node[n.id], 'availres',
copy.deepcopy(self.network.node[n.id].resources))
if mode == self.MODE_ADD:
for vnf in self.running_nfs(n.id):
# if a VNF needs to be left in place, then it is still mapped by the
# mapping process, but with placement criteria, so its resource
# requirements will be subtracted during the greedy process.
if vnf.id not in vnfs_to_be_left_in_place:
try:
newres = self.network.node[n.id].availres.subtractNodeRes(
self.network.node[vnf.id].resources,
self.network.node[n.id].resources)
except RuntimeError:
raise RuntimeError(
"Infra node`s resources are expected to represent its maximal "
"capabilities."
"The NodeNF(s) running on Infra node %s, use(s)more resource "
"than the maximal." % n.id)
else:
try:
newres = self.network.node[n.id].availres.subtractNodeRes(
vnfs_to_be_left_in_place[vnf.id].resources,
self.network.node[n.id].resources)
except RuntimeError:
raise RuntimeError("VNF %s cannot be kept on host %s with "
"increased resource requirements due to not "
"enough available resources!" % (vnf.id, n.id))
self.network.node[n.id].availres = newres
def del_flowrules_of_SGHop (self, hop_id_to_del):
"""
Deletes all flowrules, which belong to a given SGHop ID.
Compares based on Flowrule.ID and SGHop.ID they should be identical only
for the corresponding Flowrules.
:param hop_id_to_del: collection of flowrule ids need to be deleted
:type hop_id_to_del: list
:return: None
"""
for n in self.infras:
for p in n.ports:
for fr in p.flowrules:
if fr.id == hop_id_to_del:
p.del_flowrule(id=fr.id)
class NFFGToolBox(object):
"""
Helper functions for NFFG handling operations, etc.
"""
DEFAULT_SBB_ID = "SingleBiSBiS"
##############################################################################
# ------------------ Splitting/Merging-related functions ---------------------
##############################################################################
@staticmethod
def detect_domains (nffg):
"""
Return with the set of detected domains in the given ``nffg``.
:param nffg: observed NFFG
:type nffg: :class:`NFFG`
:return: set of the detected domains
:rtype: set
"""
return {infra.domain for infra in nffg.infras}
@staticmethod
def reset_inter_domain_property (nffg, log=logging.getLogger("SAP-recreate")):
"""
Check infra links and reset inter-domain properties of related ports if
needed.
:param nffg: topology
:type nffg: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: None
"""
log.debug("Check inter-domain port properties...")
for u, v, link in nffg.network.edges_iter(data=True):
# Inter-domain links are given between Infra nodes
if not (nffg[u].type == nffg[v].type == NFFG.TYPE_INFRA):
continue
sport, dport = link.src, link.dst
# SAP attributes are note None and the same
if sport.sap == dport.sap is not None:
if not (sport.has_property('type') and dport.has_property('type')):
log.debug("Found unmarked inter-domain link: %s with SAP id: %s"
% (link.id, sport.sap))
link.src.add_property(property='type', value='inter-domain')
link.dst.add_property(property='type', value='inter-domain')
log.debug(
"Mark ports as 'inter-domain': %s, %s" % (link.src, link.dst))
@staticmethod
def recreate_inter_domain_SAPs (nffg, log=logging.getLogger("SAP-recreate")):
"""
Search for possible inter-domain ports examining ports' metadata and
recreate associated SAPs.
:param nffg: observed NFFG
:type nffg: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: modified NFFG
:rtype: :class:`NFFG`
"""
for infra in nffg.infras:
for port in infra.ports:
# Check ports of remained Infra's for SAP ports
if port.get_property("type") == "inter-domain":
# Found inter-domain SAP port
log.debug("Found inter-domain SAP port: %s" % port)
adj_nodes = [v for u, v, l in nffg.real_out_edges_iter(infra.id)
if l.src.id == port.id]
if len(adj_nodes) != 0:
log.debug("Detected port connects to other node: %s!. Skip..." %
adj_nodes)
continue
# Copy optional SAP metadata as special id or name
# Create default SAP object attributes
if port.has_property("sap"):
sap_id = port.get_property("sap")
log.debug("Detected dynamic 'sap' property: %s in port: %s" %
(sap_id, port))
elif port.sap is not None:
sap_id = port.sap
log.debug("Detected static 'sap' value: %s in port: %s" %
(sap_id, port))
else:
log.warning(
"%s is detected as inter-domain port, but 'sap' metadata is not "
"found! Using 'name' metadata as fallback..." % port)
sap_id = port.get_property("name")
if port.has_property('name'):
sap_name = port.get_property("name")
log.debug('Using dynamic name: %s for inter-domain port' % sap_name)
else:
sap_name = port.name
log.debug('Using static name: %s for inter-domain port' % sap_name)
# Add SAP to splitted NFFG
if sap_id in nffg:
log.warning("%s is already in the splitted NFFG. Skip adding..." %
nffg[sap_id])
continue
sap = nffg.add_sap(id=sap_id, name=sap_name)
# Add port to SAP port number(id) is identical with the Infra's port
sap_port = sap.add_port(id=port.id, name=port.name,
properties=port.properties.copy(),
sap=port.sap,
capability=port.capability,
technology=port.technology,
delay=port.delay,
bandwidth=port.bandwidth, cost=port.cost,
controller=port.controller,
orchestrator=port.orchestrator, l2=port.l2,
l4=port.l4,
metadata=port.metadata.copy())
for l3 in port.l3:
sap_port.l3.append(l3.copy())
# Connect SAP to Infra
nffg.add_undirected_link(port1=port, port2=sap_port)
log.debug(
"Add inter-domain SAP: %s with port: %s" % (sap, sap_port))
return nffg
@staticmethod
def trim_orphaned_nodes (nffg, domain=None, log=logging.getLogger("TRIM")):
"""
Remove orphaned nodes from given :class:`NFFG`.
:param nffg: observed NFFG
:type nffg: :class:`NFFG`
:param domain: domain name
:type domain: str
:param log: additional logger
:type log: :any:`logging.Logger`
:return: trimmed NFFG
:rtype: :class:`NFFG`
"""
detected = set()
for u, v, link in nffg.network.edges_iter(data=True):
detected.add(link.src.node.id)
detected.add(link.dst.node.id)
orphaned = {n for n in nffg} - detected
for node in orphaned:
if domain and nffg[node].type == NFFG.TYPE_INFRA and \
nffg[node].domain != domain:
log.warning("Found orphaned node: %s! Remove from sliced part." %
nffg[node])
nffg.del_node(node)
if orphaned:
log.debug("Remained nodes: %s" % [n for n in nffg])
return nffg
@classmethod
def merge_new_domain (cls, base, nffg, log=logging.getLogger("MERGE")):
"""
Merge the given ``nffg`` into the ``base`` NFFG using the given domain name.
:param base: base NFFG object
:type base: :class:`NFFG`
:param nffg: updating information
:type nffg: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the update base NFFG
:rtype: :class:`NFFG`
"""
# Get new domain name
domain = cls.detect_domains(nffg=nffg)
if len(domain) == 0:
log.error("No domain detected in new %s!" % nffg)
return
if len(domain) > 1:
log.warning("Multiple domain name detected in new %s!" % nffg)
return
# Copy infras
log.debug("Merge domain: %s resource info into %s..." % (domain.pop(),
base.id))
# Check if the infra with given id is already exist in the base NFFG
for infra in nffg.infras:
if infra.id not in base:
c_infra = base.add_infra(infra=deepcopy(infra))
log.debug("Copy infra node: %s" % c_infra)
else:
log.warning("Infra node: %s does already exist in %s. Skip adding..." %
(infra, base))
# Copy NFs
for nf in nffg.nfs:
if nf.id not in base:
c_nf = base.add_nf(nf=deepcopy(nf))
log.debug("Copy NF node: %s" % c_nf)
else:
log.warning("NF node: %s does already exist in %s. Skip adding..." %
(nf, base))
# Copy SAPs
for sap_id in [s.id for s in nffg.saps]:
if sap_id in [s.id for s in base.saps]:
# Found inter-domain SAP
log.debug("Found Inter-domain SAP: %s" % sap_id)
# Search outgoing links from SAP, should be only one
b_links = [l for u, v, l in base.real_out_edges_iter(sap_id)]
if len(b_links) < 1:
log.warning(
"SAP is not connected to any node! Maybe you forgot to call "
"duplicate_static_links?")
return
elif 1 < len(b_links):
log.warning(
"Inter-domain SAP should have one and only one connection to the "
"domain! Using only the first connection.")
continue
# Get inter-domain port in base NFFG
domain_port_dov = b_links[0].dst
sap_port_dov = b_links[0].src
log.debug("Found inter-domain port: %s" % domain_port_dov)
# Search outgoing links from SAP, should be only one
n_links = [l for u, v, l in nffg.real_out_edges_iter(sap_id)]
if len(n_links) < 1:
log.warning(
"SAP is not connected to any node! Maybe you forgot to call "
"duplicate_static_links?")
return
elif 1 < len(n_links):
log.warning(
"Inter-domain SAP should have one and only one connection to the "
"domain! Using only the first connection.")
continue
# Get port and Infra id's in nffg NFFG
p_id = n_links[0].dst.id
n_id = n_links[0].dst.node.id
# Get the inter-domain port from already copied Infra
domain_port_nffg = base.network.node[n_id].ports[p_id]
sap_port_nffg = n_links[0].src
log.debug("Found inter-domain port: %s" % domain_port_nffg)
# Copy inter-domain port properties/values for redundant storing
if len(domain_port_nffg.properties) > 0:
domain_port_dov.properties.update(domain_port_nffg.properties)
log.debug("Copy inter-domain port properties: %s" %
domain_port_dov.properties)
elif len(domain_port_dov.properties) > 0:
domain_port_nffg.properties.update(domain_port_dov.properties)
log.debug("Copy inter-domain port properties: %s" %
domain_port_nffg.properties)
# Ensure to add sap tag to inter domain ports
if 'sap' not in domain_port_dov.properties:
domain_port_dov.add_property("sap", sap_id)
if 'sap' not in domain_port_nffg.properties:
domain_port_nffg.add_property("sap", sap_id)
# Signal Inter-domain port type
domain_port_dov.add_property("type", "inter-domain")
domain_port_nffg.add_property("type", "inter-domain")
# Copy SAP port values into the infra ports
domain_port_dov.name = sap_port_dov.name
domain_port_dov.sap = sap_port_dov.sap
domain_port_dov.capability = sap_port_dov.capability
domain_port_dov.technology = sap_port_dov.technology
domain_port_dov.delay = sap_port_dov.delay
domain_port_dov.bandwidth = sap_port_dov.bandwidth
domain_port_dov.cost = sap_port_dov.cost
domain_port_dov.controller = sap_port_dov.controller
domain_port_dov.orchestrator = sap_port_dov.orchestrator
domain_port_dov.l2 = sap_port_dov.l2
domain_port_dov.l4 = sap_port_dov.l4
for l3 in sap_port_dov.l3:
domain_port_dov.l3.append(l3.copy())
domain_port_dov.metadata.update(sap_port_dov.metadata)
domain_port_nffg.name = sap_port_nffg.name
domain_port_nffg.sap = sap_port_nffg.sap
domain_port_nffg.capability = sap_port_nffg.capability
domain_port_nffg.technology = sap_port_nffg.technology
domain_port_nffg.delay = sap_port_nffg.delay
domain_port_nffg.bandwidth = sap_port_nffg.bandwidth
domain_port_nffg.cost = sap_port_nffg.cost
domain_port_nffg.controller = sap_port_nffg.controller
domain_port_nffg.orchestrator = sap_port_nffg.orchestrator
domain_port_nffg.l2 = sap_port_nffg.l2
domain_port_nffg.l4 = sap_port_nffg.l4
for l3 in sap_port_nffg.l3:
domain_port_nffg.l3.append(l3.copy())
domain_port_nffg.metadata.update(sap_port_nffg.metadata)
# Delete both inter-domain SAP and links connected to them
base.del_node(sap_id)
nffg.del_node(sap_id)
# Add the inter-domain links for both ways
l1, l2 = base.add_undirected_link(
p1p2id="inter-domain-link-%s" % sap_id,
p2p1id="inter-domain-link-%s-back" % sap_id,
port1=domain_port_dov,
port2=domain_port_nffg)
# Set delay/bandwidth values for outgoing link port1 -> port2
l1.delay = domain_port_dov.delay
l1.bandwidth = domain_port_dov.bandwidth
# Set delay/bandwidth values for outgoing link port2 -> port2
l2.delay = domain_port_nffg.delay
l2.bandwidth = domain_port_nffg.bandwidth
else:
# Normal SAP --> copy SAP
c_sap = base.add_sap(sap_obj=deepcopy(nffg.network.node[sap_id]))
log.debug("Copy SAP: %s" % c_sap)
# Copy remaining links which should be valid
for u, v, link in nffg.network.edges_iter(data=True):
src_port = base.network.node[u].ports[link.src.id]
dst_port = base.network.node[v].ports[link.dst.id]
tmp_src, tmp_dst = link.src, link.dst
link.src = link.dst = None
c_link = deepcopy(link)
c_link.src = src_port
c_link.dst = dst_port
link.src, link.dst = tmp_src, tmp_dst
base.add_link(src_port=src_port, dst_port=dst_port, link=c_link)
log.debug("Copy Link: %s" % c_link)
log.debug("Domain merging has been finished!")
# Return the updated NFFG
return base
@staticmethod
def strip_domain (nffg, domain, log=logging.getLogger("STRIP")):
"""
Trim the given :class:`NFFG` and leave only the nodes belong to the given
``domain``.
..warning::
No inter-domain SAP recreation will be performed after the trim!
:param nffg: mapped NFFG object
:type nffg: :class:`NFFG`
:param domain: extracted domain name
:type domain: str
:param log: additional logger
:type log: :any:`logging.Logger`
:return: stripped NFFG
:rtype: :class:`NFFG`
"""
log.info("Strip domain in %s" % nffg)
nffg = nffg.copy()
# Collect every node which not in the domain
deletable = set()
for infra in nffg.infras:
# Domains representations based on infras
if infra.domain == domain:
# Skip current domains infra
continue
# Mark the infra as deletable
deletable.add(infra.id)
# Look for orphan NF ans SAP nodes which connected to this deletable infra
for node_id in nffg.real_neighbors_iter(infra.id):
if nffg[node_id].type in (NFFG.TYPE_SAP, NFFG.TYPE_NF):
deletable.add(node_id)
log.debug("Nodes marked for deletion: %s" % deletable)
nffg.network.remove_nodes_from(deletable)
log.debug("Remained nodes: %s" % [n for n in nffg])
return nffg
@classmethod
def extract_domain (cls, nffg, domain, log=logging.getLogger("EXTRACT")):
"""
Extract domain view from given :class:``NFFG``.
:param nffg: mapped NFFG object
:type nffg: :class:`NFFG`
:param domain: extracted domain name
:type domain: str
:param log: additional logger
:type log: :any:`logging.Logger`
:return: extracted domain NFFG
:rtype: :class:`NFFG`
"""
return cls.recreate_inter_domain_SAPs(nffg=cls.strip_domain(nffg=nffg,
domain=domain,
log=log))
@classmethod
def split_into_domains (cls, nffg, log=logging.getLogger("SPLIT")):
"""
Split given :class:`NFFG` into separate parts self._global_nffg on
original domains.
:param nffg: mapped NFFG object
:type nffg: :class:NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: sliced parts as a list of (domain_name, nffg_part) tuples
:rtype: list
"""
splitted_parts = []
log.info("Splitting NFFG: %s according to detected domains" % nffg)
# Define DOMAIN names
domains = cls.detect_domains(nffg=nffg)
log.debug("Detected domains for splitting: %s" % domains)
if len(domains) == 0:
log.warning("No domain has been detected!")
return splitted_parts
NFFGToolBox.reset_inter_domain_property(nffg=nffg, log=log)
# Checks every domain
for domain in domains:
log.info("Create slice for domain: %s" % domain)
# Collect every node which not in the domain
deletable = set()
for infra in nffg.infras:
# Domains representations based on infras
if infra.domain == domain:
# Skip current domains infra
continue
# Mark the infra as deletable
deletable.add(infra.id)
# Look for orphan NF ans SAP nodes which connected to this deletable
# infra
for node_id in nffg.real_neighbors_iter(infra.id):
if nffg[node_id].type in (NFFG.TYPE_SAP, NFFG.TYPE_NF):
deletable.add(node_id)
log.debug("Nodes marked for deletion: %s" % deletable)
log.debug("Clone NFFG...")
# Copy the NFFG
nffg_part = nffg.copy()
# Set metadata
nffg_part.name = domain
# Delete needless nodes --> and as a side effect the connected links too
log.debug("Delete marked nodes...")
nffg_part.network.remove_nodes_from(deletable)
if len(nffg_part):
log.debug("Remained nodes: %s" % [n for n in nffg_part])
else:
log.debug("No node was remained after splitting!")
splitted_parts.append((domain, nffg_part))
log.debug(
"Search for inter-domain SAP ports and recreate associated SAPs...")
# Recreate inter-domain SAP
cls.recreate_inter_domain_SAPs(nffg=nffg_part, log=log)
# Check orphaned or not connected nodes and remove them
log.debug("Trim orphaned nodes from splitted part...")
cls.trim_orphaned_nodes(nffg=nffg_part, domain=domain, log=log)
log.debug("Merge external ports into it's original SAP port...")
cls.merge_external_ports(nffg=nffg_part, log=log)
log.info("Splitting has been finished!")
return splitted_parts
@classmethod
def split_nfs_by_domain (cls, nffg, nfs=None, log=logging.getLogger('SPLIT')):
"""
Split the given NF IDs based on domains defined in given NFFG.
:param nffg: base NFFG
:type nffg: :class:`class`
:param nfs: collection of NF Ids
:type nfs: list or set
:param log: additional logger
:type log: :any:`logging.Logger`
:return: splitted NF IDs
:rtype: dict
"""
if nfs is None:
nfs = [nfs.id for nfs in nffg.nfs]
log.debug("Splitting nfs: %s by domains..." % nfs)
domains = {}
for nf in nfs:
domain = nffg.get_domain_of_nf(nf_id=nf)
if not domain:
log.warning("Missing domain of nf: %s" % nf)
continue
if domain in domains:
domains[domain].append(nf)
else:
domains[domain] = [nf]
return domains
@classmethod
def recreate_missing_match_TAGs (cls, nffg, log=logging.getLogger("TAG")):
"""
Recreate TAGs for flowrules forwarding traffic from a different domain.
In case there is a hop in the service request mapped as a collocated link
it might break down to multiple links/flowrules in a lower layer where the
links are placed into different domains therefore the match/action field are
created without tags because collocated links do not use tags by default.
:param nffg: mapped NFFG object
:type nffg: :any:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: None
"""
log.debug("Recreate missing TAG matching fields...")
for infra in nffg.infras:
# Iterate over flowrules of the infra
for flowrule in infra.flowrules():
# Get the source in_port of the flowrule from match field
splitted = flowrule.match.split(';', 1)
in_port = splitted[0].split('=')[1]
try:
# Convert in_port to int if it is possible
in_port = int(in_port)
except ValueError:
pass
# If the port is an inter-domain port
if infra.ports[in_port].get_property('type') == "inter-domain":
log.debug("Found inter-domain port: %s", infra.ports[in_port])
if len(splitted) > 1:
# There is one or more TAG in match
tags = splitted[1].split(';')
found = False
for tag in tags:
try:
vlan = tag.split('|')[-1]
except ValueError:
continue
# Found a TAG with the vlan
if vlan == str(flowrule.id):
found = True
break
if found:
# If found the appropriate TAG -> skip adding
continue
log.debug("TAG with vlan: %s is not found in %s!" % (flowrule.id,
flowrule))
match_vlan = ";TAG=<None>|<None>|%s" % flowrule.id
flowrule.match += match_vlan
log.debug("Manually extended match field: %s" % flowrule.match)
@classmethod
def rewrite_interdomain_tags (cls, slices, flowrule_stitching=None,
log=logging.getLogger("adaptation.TAG")):
"""
Calculate and rewrite inter-domain tags.
Inter-domain connections via inter-domain SAPs are harmonized
here. The abstract tags in flowrules are rewritten to technology
specific ones based on the information retrieved from inter-domain
SAPs.
:param slices: list of mapped :class:`NFFG` instances
:type slices: list
:param log: additional logger
:type log: :any:`logging.Logger`
:return: list of NFFG structures with updated tags
"""
log.debug("Calculating inter-domain tags...")
for nffg in slices:
log.debug("Processing domain %s" % nffg[0])
# collect SAP ports of infra nodes
sap_ports = []
for sap in nffg[1].saps:
sap_switch_links = [(u, v, link) for u, v, link in
nffg[1].network.edges_iter(data=True) if
sap.id in (u, v) and
link.type == NFFG.TYPE_LINK_STATIC]
# sap_switch_links = [e for e in
# nffg[1].network.edges_iter(data=True) if
# sap.id in e]
# list of e = (u, v, data)
try:
if sap_switch_links[0][0] == sap.id:
sap_ports.append(sap_switch_links[0][2].dst)
else:
sap_ports.append(sap_switch_links[0][2].src)
except IndexError:
log.error(
"Link for SAP: %s is not found." % sap)
continue
log.debug("SAP_PORTS: %s" % sap_ports)
for infra in nffg[1].infras:
# log.debug("Processing infra %s" % infra)
for flowrule in infra.flowrules():
for sap_port in sap_ports:
# process inbound flowrules of SAP ports
if re.search('in_port=', flowrule.match):
in_port = re.sub(r'.*in_port=([^;]*).*', r'\1',
flowrule.match)
if str(in_port) == str(sap_port.id):
# found inbound rule
log.debug("Found inbound flowrule (%s):\n %s"
% (flowrule.id, flowrule))
if sap_port.sap is not None:
log.debug("Found inter-domain SAP port: %s, %s" %
(sap_port, sap_port.sap))
# rewrite TAG in match field
if not re.search(r'TAG', flowrule.match):
match_tag = ";TAG=<None>|<None>|%s" % flowrule.id
flowrule.match += match_tag
log.info("TAG conversion: extend match field in a "
"flowrule of infra %s" % infra.id)
log.info("updated flowrule (%s):\n %s"
% (flowrule.id, flowrule))
else:
log.debug("Found user SAP port: %s" %
sap_port)
# remove TAG from match field
if re.search(r'TAG', flowrule.match):
flowrule.match = re.sub(r'(;TAG=[^;]*)', r'',
flowrule.match)
log.info("TAG conversion: remove TAG match in a "
"flowrule of infra %s" % infra.id)
log.info("updated flowrule (%s):\n %s"
% (flowrule.id, flowrule))
# process outbound flowrules of SAP ports
if re.search('output=', flowrule.action):
output = re.sub(r'.*output=([^;]*).*', r'\1',
flowrule.action)
if str(output) == str(sap_port.id):
# found outbound rule
log.debug("Found outbound rule (%s):\n %s"
% (flowrule.id, flowrule))
if sap_port.sap is not None:
log.debug("Found inter-domain SAP port: %s, %s" %
(sap_port, sap_port.sap))
# rewrite TAG in action field
if not re.search(r'TAG', flowrule.action):
push_tag = ";TAG=<None>|<None>|%s" % flowrule.id
flowrule.action += push_tag
log.info("TAG conversion: extend action field in a "
"flowrule of infra %s" % infra.id)
log.info("updated flowrule (%s):\n %s"
% (flowrule.id, flowrule))
else:
log.debug("Found user SAP port: %s" %
sap_port)
# remove TAG from action field
if re.search(r';TAG', flowrule.action):
flowrule.action = re.sub(r'(;TAG=[^;]*)', r'',
flowrule.action)
log.info("TAG conversion: remove TAG action in a "
"flowrule of infra %s" % infra.id)
# add UNTAG to action field
if not re.search(r'UNTAG', flowrule.action):
flowrule.action += ';UNTAG'
log.info("TAG conversion: add UNTAG action in a "
"flowrule of infra %s" % infra.id)
log.info("updated flowrule (%s):\n %s"
% (flowrule.id, flowrule))
return slices
@staticmethod
def rebind_e2e_req_links (nffg, log=logging.getLogger("REBIND")):
"""
Search for splitted requirement links in the NFFG. If a link connects
inter-domain SAPs rebind the link as an e2e requirement link.
:param nffg: splitted NFFG object
:type nffg: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: rebounded NFFG
:rtype: :class:`NFFG`
"""
log.debug(
"Search for requirement link fragments to rebind as e2e requirement...")
req_cache = []
def __detect_connected_sap (port):
"""
Detect if the given port is connected to a SAP.
:param port: port object
:type port: :any:`Port`
:return: SAP port or None
:rtype: :any:`Port`
"""
connected_port = [l.dst for u, v, l in
nffg.real_out_edges_iter(port.node.id)
if str(l.src.id) == str(port.id)]
# If the number of detected nodes is unexpected continue to the next req
if len(connected_port) < 1:
log.warning("Skip edge rebinding: No connected node is detected for "
"SAP port: %s" % port)
return None
elif len(connected_port) > 1:
log.warning("Skip edge rebinding: Multiple connected nodes are "
"detected for SAP port: %s: %s!" % (port, connected_port))
return None
elif connected_port[0].node.type == NFFG.TYPE_SAP:
return connected_port[0]
else:
return None
for req in nffg.reqs:
if req.src.node.type == NFFG.TYPE_SAP and \
req.dst.node.type == NFFG.TYPE_SAP:
log.debug("Skip rebinding: Detected %s is already an end-to-end link!" %
req)
return nffg
# Detect the node connected to the src port of req link
src_sap_port = __detect_connected_sap(port=req.src)
if src_sap_port:
log.debug("Detected src SAP node: %s" % src_sap_port)
else:
continue
# Detect the node connected to the dst port of req link
dst_sap_port = __detect_connected_sap(port=req.dst)
if dst_sap_port:
log.debug("Detected dst SAP node: %s" % dst_sap_port)
else:
continue
# Create e2e req link and store for rebinding
e2e_req = req.copy()
e2e_req.src = src_sap_port
e2e_req.dst = dst_sap_port
req_cache.append((req.src.node.id, req.dst.node.id, req.id, e2e_req))
# Rebind marked Requirement links
if not req_cache:
log.debug("No requirement link has been rebounded!")
else:
for src, dst, id, e2e in req_cache:
nffg.del_edge(src=src, dst=dst, id=id)
nffg.add_edge(src=e2e.src, dst=e2e.dst, link=e2e)
log.debug("Rebounded requirement link: %s" % e2e)
# Return the rebounded NFFG
return nffg
##############################################################################
# ----------------------- Single BiSBiS view generation ----------------------
##############################################################################
@staticmethod
def generate_SBB_representation (nffg, sbb_id=DEFAULT_SBB_ID,
add_sg_hops=False,
log=logging.getLogger("SBB")):
"""
Generate the trivial virtual topology a.k.a one BisBis or Single BisBis
representation with calculated resources and transferred NF and SAP nodes.
:param nffg: global resource
:type nffg: :class:`NFFG`
:param add_sg_hops: recreate SG hop links also (default: False)
:type add_sg_hops: bool
:param log: additional logger
:type log: :any:`logging.Logger`
:return: single Bisbis representation
:rtype: :class:`NFFG`
"""
if nffg is None:
log.error("Missing global resource info! Skip OneBisBis generation!")
return None
# Create Single BiSBiS NFFG
log.debug("Generate trivial SingleBiSBiS NFFG based on %s:" % nffg)
log.debug("START SBB generation...")
sbb = NFFG(id=sbb_id, name="Single-BiSBiS-View")
# Create the single BiSBiS infra
sbb_infra = sbb.add_infra(id="SingleBiSBiS",
name="SingleBiSBiS",
domain=NFFG.DEFAULT_DOMAIN,
infra_type=NFFG.TYPE_INFRA_BISBIS)
# Compute and add resources
# Sum of available CPU
try:
sbb_infra.resources.cpu = sum(
# If iterator is empty, sum got None --> TypeError thrown by sum
(n.resources.cpu for n in nffg.infras if
n.resources.cpu is not None) or None)
except TypeError:
sbb_infra.resources.cpu = None
# Sum of available memory
try:
sbb_infra.resources.mem = sum(
# If iterator is empty, sum got None --> TypeError thrown by sum
(n.resources.mem for n in nffg.infras if
n.resources.mem is not None) or None)
except TypeError:
sbb_infra.resources.mem = None
# Sum of available storage
try:
sbb_infra.resources.storage = sum(
# If iterator is empty, sum got None --> TypeError thrown by sum
(n.resources.storage for n in nffg.infras if
n.resources.storage is not None) or None)
except TypeError:
sbb_infra.resources.storage = None
# Minimal available delay value of infras and links in DoV
try:
# Get the minimum delay in Dov to avoid false negative mapping result
sbb_infra.resources.delay = min(itertools.chain(
# If the chained iterators is empty --> ValueError thrown by sum
(n.resources.delay for n in nffg.infras if
n.resources.delay is not None),
(l.delay for l in nffg.links if l.delay is not None)))
except ValueError:
sbb_infra.resources.delay = None
# Maximum available bandwidth value of infras and links in DoV
try:
max_bw = max(itertools.chain(
(n.resources.bandwidth for n in nffg.infras if
n.resources.bandwidth is not None),
(l.bandwidth for l in nffg.links if l.bandwidth is not None)))
# Number of infras and links in DoV
sum_infra_link = sum(1 for _ in itertools.chain(nffg.infras, nffg.links))
# Overestimate switching capacity to avoid false positive mapping result
sbb_infra.resources.bandwidth = max_bw * sum_infra_link
except ValueError:
sbb_infra.resources.bandwidth = None
log.debug("Computed SingleBiBBiS resources: %s" % sbb_infra.resources)
# Add supported types
s_types = set()
for infra in nffg.infras:
s_types = s_types.union(infra.supported)
sbb_infra.add_supported_type(s_types)
log.debug("Added supported types: %s" % s_types)
log.debug("Added Infra BiSBiS: %s" % sbb_infra)
log.log(VERBOSE, "SBB:\n%s" % sbb_infra.dump())
# Add existing NFs
for nf in nffg.nfs:
c_nf = sbb.add_nf(nf=nf.copy())
log.debug("Added NF: %s" % c_nf)
log.log(VERBOSE, "NF:\n%s" % nf.dump())
# Discover and add NF connections
for u, v, l in nffg.real_out_edges_iter(nf.id):
if l.type != NFFG.TYPE_LINK_DYNAMIC:
continue
# Explicitly add links for both direction
link1, link2 = sbb.add_undirected_link(port1=c_nf.ports[l.src.id],
port2=sbb_infra.add_port(
id=l.dst.id),
p1p2id=l.id,
p2p1id="%s-back" % l.id,
dynamic=True,
delay=l.delay,
bandwidth=l.bandwidth)
log.debug("Added connection: %s" % link1)
log.debug("Added connection: %s" % link2)
# Use SAP id --> SBB port id cache for delay matrix calculation
delay_matrix_cache = {}
# Add existing SAPs and their connections to the SingleBiSBiS infra
for sap in nffg.saps:
for p in sap.ports:
if str(p.id).startswith("EXTERNAL"):
log.debug("Detected EXTERNAL port: %s in SAP: %s! Skip adding..."
% (p.id, sap.id))
continue
c_sap = sbb.add_sap(sap_obj=sap.copy())
log.debug("Added SAP: %s" % c_sap)
log.log(VERBOSE, "SAP:\n%s" % c_sap.dump())
# Discover and add SAP connections
for u, v, l in nffg.real_out_edges_iter(sap.id):
if len(sap.ports) > 1:
log.warning("SAP contains multiple port!")
sbb_infra_port = sbb_infra.add_port(id=str(c_sap.id),
sap=sap.ports.container[0].sap)
# Explicitly add links for both direction
link1, link2 = sbb.add_undirected_link(port1=c_sap.ports[l.src.id],
port2=sbb_infra_port,
p1p2id=l.id,
p2p1id="%s-back" % l.id,
delay=l.delay,
bandwidth=l.bandwidth)
log.debug("Added connection: %s" % link1)
log.debug("Added connection: %s" % link2)
delay_matrix_cache[c_sap.id] = sbb_infra_port.id
# Shortest paths in format of dict in dict keyed with node ids
# e.g. SAP2 --> EE1 --> 4.9
latency_paths = NFFGToolBox.shortestPathsInLatency(G=nffg.network)
log.log(VERBOSE, "Calculated latency paths for delay matrix:\n%s"
% pprint.pformat(latency_paths))
log.log(VERBOSE, "Collected SAP ports for delay matrix:\n%s"
% pprint.pformat(delay_matrix_cache))
dm_elements = itertools.permutations(delay_matrix_cache.keys(), 2)
for src, dst in dm_elements:
if src not in latency_paths:
log.warning("Missing node: %s for latency paths: %s!"
% (src, (src, dst)))
continue
if dst not in latency_paths[src]:
log.warning("Missing node: %s for latency paths: %s!"
% (src, (src, dst)))
else:
sbb_infra.delay_matrix.add_delay(src=src,
dst=dst,
delay=latency_paths[src][dst])
log.debug("Added delay matrix element [%s --> %s]: %s"
% (src, dst, latency_paths[src][dst]))
# Recreate flowrules based on NBalazs functions
sg_hop_info = NFFGToolBox.get_all_sghop_info(nffg=nffg)
log.log(VERBOSE, "Detected SG hop info:\n%s" % pprint.pformat(sg_hop_info))
log.debug("Recreate flowrules...")
for sg_id, value in sg_hop_info.iteritems():
sg_src_node = value[0].node.id
sg_src_port = value[0].id
sg_dst_node = value[1].node.id
sg_dst_port = value[1].id
flowclass = value[2]
fr_bw = value[3]
fr_delay = value[4]
fr_const = deepcopy(value[5])
fr_extra = value[6]
fr_hop = sg_id
sbb_src_port = [l.dst for u, v, l in
sbb.network.out_edges_iter(sg_src_node, data=True) if
l.src.id == sg_src_port and l.src.node.id == sg_src_node]
if len(sbb_src_port) < 1:
log.warning("No opposite Port(node: %s, id: %s) was found for SG hop: "
"%s in new SingleBiSBiS node" % (
sg_src_node, sg_src_port, fr_hop))
continue
if len(sbb_src_port) > 1:
log.warning("Too much Port(node: %s, id: %s) was found for SG hop: "
"%s in new SingleBiSBiS node: %s" % (
sg_src_node, sg_src_port, fr_hop, sbb_src_port))
continue
sbb_src_port = sbb_src_port.pop()
sbb_dst_port = [l.dst for u, v, l in
sbb.network.out_edges_iter(sg_dst_node, data=True) if
l.src.id == sg_dst_port and l.src.node.id == sg_dst_node]
if len(sbb_dst_port) < 1:
log.warning("No opposite Port(node: %s, id: %s) was found for SG hop: "
"%s in new SingleBiSBiS node" % (
sg_dst_node, sg_dst_port, fr_hop))
continue
if len(sbb_dst_port) > 1:
log.warning("Too much Port(node: %s, id: %s) was found for SG hop: "
"%s in new SingleBiSBiS node: %s" % (
sg_dst_node, sg_dst_port, fr_hop, sbb_dst_port))
continue
sbb_dst_port = sbb_dst_port.pop()
if flowclass:
fr_match = "in_port=%s;flowclass=%s" % (sbb_src_port.id, flowclass)
else:
fr_match = "in_port=%s" % sbb_src_port.id
fr_action = "output=%s" % sbb_dst_port.id
if fr_extra is not None:
fr_action += ";%s" % fr_extra
if value[0].node.type == NFFG.TYPE_SAP and \
value[1].node.type == NFFG.TYPE_NF and \
value[0].sap is not None:
# Update action for flowrule connecting inter-domain SAP to NF
fr_action += ";UNTAG"
fr = sbb_src_port.add_flowrule(id=fr_hop,
match=fr_match,
action=fr_action,
bandwidth=fr_bw,
delay=fr_delay,
constraints=fr_const)
log.debug("Added flowrule: %s" % fr)
if add_sg_hops:
log.debug("Recreate SG hops...")
for sg_id, value in sg_hop_info.iteritems():
sg_src_port = value[0]
sg_dst_port = value[1]
hop_fc = value[2]
hop_bw = value[3]
hop_delay = value[4]
hop_const = deepcopy(value[5])
sg = sbb.add_sglink(id=sg_id,
src_port=sg_src_port,
dst_port=sg_dst_port,
flowclass=hop_fc,
delay=hop_delay,
bandwidth=hop_bw,
constraints=hop_const)
log.debug("Added SG hop: %s" % sg)
else:
log.debug("Skip SG hop recreation for the SingleBiSBiS!")
NFFGToolBox.rewrite_interdomain_tags([(sbb.id, sbb)])
log.debug("END SBB generation...")
# Return with Single BiSBiS infra
return sbb
##############################################################################
# ----------------------- Domain update functions -----------------------
##############################################################################
@classmethod
def clear_domain (cls, base, domain, log=logging.getLogger("CLEAN")):
"""
Clean domain by removing initiated NFs and flowrules related to BiSBiS
nodes of the given domain
:param base: base NFFG object
:type base: :class:`NFFG`
:param domain: domain name
:type domain: str
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the update base NFFG
:rtype: :class:`NFFG`
"""
base_domain = cls.detect_domains(nffg=base)
if domain not in base_domain:
log.warning("No node was found in %s with domain: %s for cleanup! "
"Leave NFFG unchanged..." % (base, domain))
return base
for infra in base.infras:
deletable_ports = set()
deletable_nfs = set()
# Skip nodes from other domains
if infra.domain != domain:
continue
# Iterate over out edges from the current BB node
for infra_id, node_id, link in base.real_out_edges_iter(infra.id):
# Mark connected NF for deletion
if base[node_id].type in (NFFG.TYPE_NF,):
deletable_nfs.add(node_id)
# Mark related dynamic port for deletion
deletable_ports.add(link.src)
if deletable_nfs:
log.debug("Initiated NFs marked for deletion: %s on node: %s" %
(deletable_nfs, infra.id))
# Remove NFs
base.network.remove_nodes_from(deletable_nfs)
if deletable_ports:
log.debug("Dynamic ports marked for deletion: %s on node: %s" %
(deletable_ports, infra.id))
# Remove dynamic ports
for p in deletable_ports:
base[infra.id].ports.remove(p)
# Delete flowrules from ports
for port in base[infra.id].ports:
port.clear_flowrules()
return base
@classmethod
def remove_domain (cls, base, domain, log=logging.getLogger("REMOVE")):
"""
Remove elements from the given ``base`` :class:`NFFG` with given ``domain``
name.
:param base: base NFFG object
:type base: :class:`NFFG`
:param domain: domain name
:type domain: str
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the update base NFFG
:rtype: :class:`NFFG`
"""
log.debug("Remove nodes and edges which part of the domain: %s from %s..."
% (domain, base))
# Check existing domains
base_domain = cls.detect_domains(nffg=base)
if domain not in base_domain:
log.warning("No node was found in %s with domain: %s for removing! "
"Leave NFFG unchanged..." % (base, domain))
return base
deletable = set()
for infra in base.infras:
# Add deletable infras
if infra.domain != domain:
continue
deletable.add(infra.id)
# Add deletable SAP/NF connected to iterated infra
for node_id in base.real_neighbors_iter(infra.id):
if base[node_id].type in (NFFG.TYPE_SAP, NFFG.TYPE_NF):
deletable.add(node_id)
log.debug("Nodes marked for deletion: %s" % deletable)
base.network.remove_nodes_from(deletable)
if len(base):
log.debug("Remained nodes after deletion: %s" % [n for n in base])
else:
log.debug("No node was remained after splitting! ")
log.debug("Search for inter-domain SAP ports and "
"recreate associated SAPs...")
cls.recreate_inter_domain_SAPs(nffg=base, log=log)
# Check orphaned or not connected nodes and remove them
log.debug("Trim orphaned nodes from updated NFFG...")
cls.trim_orphaned_nodes(nffg=base, log=log)
return base
@classmethod
def update_domain (cls, base, updated, log):
"""
Update the given ``updated`` nffg into the ``base`` NFFG.
:param base: base NFFG object
:type base: :class:`NFFG`
:param updated: updated domain information
:type updated: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the update base NFFG
:rtype: :class:`NFFG`
"""
# Get new domain name
domain = cls.detect_domains(nffg=updated)
if len(domain) == 0:
log.error("No domain detected in new %s!" % updated)
return
if len(domain) > 1:
log.warning("Multiple domain name detected in new %s!" % updated)
return
domain = domain.pop()
log.debug("Update elements of domain: %s in %s..." % (domain, base.id))
base_infras = {i.id for i in base.infras if i.domain == domain}
if len(base_infras) == 0:
log.warning("No Node was found in the base %s! Use merging..." % base)
return cls.merge_new_domain(base=base, nffg=updated, log=log)
# If infra nodes were removed or added, best way is to remerge domain
else:
# TODO - implement real update
log.error("Domain update has not implemented yet!")
##############################################################################
# ------------------- Status info-based update functions ---------------------
##############################################################################
@classmethod
def update_status_info (cls, nffg, status,
log=logging.getLogger("UPDATE-STATUS")):
"""
Update the mapped elements of given nffg with given status.
:param nffg: base NFFG object
:type nffg: :class:`NFFG`
:param status: new status
:type status: str
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the update base NFFG
:rtype: :class:`NFFG`
"""
log.debug("Add %s status for NFs and Flowrules..." % status)
for nf in nffg.nfs:
nf.status = status
for infra in nffg.infras:
for flowrule in infra.flowrules():
flowrule.status = status
return nffg
@classmethod
def update_nffg_by_status (cls, base, updated,
log=logging.getLogger("UPDATE-DOMAIN-STATUS")):
"""
Update status of the elements of the given ``base`` nffg based on the
given ``updated`` nffg.
:param base: base NFFG object
:type base: :class:`NFFG`
:param updated: updated domain information
:type updated: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the update base NFFG
:rtype: :class:`NFFG`
"""
# Update NF status
base_nfs = {nf.id for nf in base.nfs}
updated_nfs = {nf.id for nf in updated.nfs}
log.debug("Update status of NF nodes: %s" % updated_nfs)
for nf in base_nfs:
if nf in updated_nfs:
base[nf].status = updated[nf].status
else:
log.warning("Missing NF: %s from base NFFG: %s" % (nf, base))
# Update Flowrule status
base_infras = {infra.id for infra in base.infras}
updated_infras = {infra.id for infra in updated.infras}
log.debug("Update status of flowrules in Infra nodes: %s" % updated_infras)
for infra_id in base_infras:
# Skip Infras from other domains
if infra_id not in updated_infras:
continue
for port in base[infra_id].ports:
if port.id not in updated[infra_id].ports:
log.warning("Port: %s in Infra: %s is not in the updated NFFG! "
"Skip flowrule status update in this Port..."
% (port.id, infra_id))
continue
for fr in base[infra_id].ports[port.id].flowrules:
changed = False
for ufr in updated[infra_id].ports[port.id].flowrules:
# Theoretically in a port there is only one flowrule with a given
# hop_id --> if the hop_ids are the same it must be the same fr
if fr.id == ufr.id:
fr.status = ufr.status
changed = True
break
if not changed:
log.warning("Flowrule: %s is not in the updated NFFG! "
"Skip flowrule status update..." % fr)
return base
@classmethod
def update_status_by_dov (cls, nffg, dov, init_status=NFFG.STATUS_PENDING,
log=logging.getLogger("UPDATE-DOV-STATUS")):
"""
Update status of the elements of the given ``base`` nffg based on the
given ``updated`` nffg.
:param nffg: base NFFG object
:type nffg: :class:`NFFG`
:param dov: updated domain information
:type dov: :class:`NFFG`
:type init_status: init status of new element
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the update base NFFG
:rtype: :class:`NFFG`
"""
# Update NF status
nffg_nfs = {nf.id for nf in nffg.nfs}
dov_nfs = {nf.id for nf in dov.nfs}
log.debug("Update status of existing NF nodes: %s" % nffg_nfs)
for nf in nffg_nfs:
if nf in dov_nfs:
nffg[nf].status = dov[nf].status
else:
nffg[nf].status = init_status
# Update Flowrule status
for infra in nffg.infras:
for flowrule in infra.flowrules():
flowrule.status = init_status
nffg_infras = {infra.id for infra in nffg.infras}
dov_infras = {infra.id for infra in dov.infras}
log.debug("Update status of existing flowrules in Infra nodes: %s" %
nffg_infras)
for infra_id in nffg_infras:
if infra_id not in dov_infras:
continue
for port in nffg[infra_id].ports:
if port.id not in dov[infra_id].ports:
continue
dov_frs = {f.id for f in dov[infra_id].ports[port.id].flowrules}
for fr in nffg[infra_id].ports[port.id].flowrules:
if fr.id not in dov_frs:
fr.status = init_status
for f in dov[infra_id].ports[port.id].flowrules:
if f.id == fr.id:
fr.status = f.status
return nffg
def filter_non_running_NFs (self, nffg, log=logging.getLogger("FILTER")):
"""
Create a new NFFG from the given ``nffg`` and filter out the
stopped/failed Nfs.
:param nffg: base NFFG object
:type nffg: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: None
"""
# TODO implement
pass
@classmethod
def remove_deployed_services (cls, nffg, log=logging.getLogger("CLEAN")):
"""
Remove all the installed NFs, flowrules and dynamic ports from given NFFG.
:param nffg: base NFFG
:type nffg: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the cleaned nffg
:rtype: :class:`NFFG`
"""
for infra in nffg.infras:
log.debug("Remove deployed elements from Infra: %s" % infra.id)
del_ports = []
del_nfs = []
for src, dst, link in nffg.network.out_edges_iter(data=True):
if link.type == NFFG.TYPE_LINK_DYNAMIC and \
link.dst.node.type == NFFG.TYPE_NF:
del_nfs.append(dst)
del_ports.append(link.src.id)
if del_nfs:
nffg.network.remove_nodes_from(del_nfs)
log.debug("Removed NFs: %s" % del_nfs)
if del_ports:
for id in del_ports:
infra.del_port(id)
log.debug("Removed dynamic ports: %s" % del_ports)
log.debug("Clear flowrules...")
for port in infra.ports:
port.clear_flowrules()
return nffg
##############################################################################
# ----------------------- High level NFFG operations ------------------------
##############################################################################
@classmethod
def _copy_node_type (cls, type_iter, target, log):
"""
Copies all element from iterator if it is not in target, and merges their
port lists.
:param type_iter: Iterator on objects to be added
:type type_iter: :any: iterator on `Node`
:param target: The target NFFG
:type target: :any: `NFFG`
:return: the updated base NFFG
:rtype: :class:`NFFG`
"""
for obj in type_iter:
if obj.id not in target:
c_obj = target.add_node(deepcopy(obj))
log.debug("Copy NFFG node: %s" % c_obj)
else:
for p in obj.ports:
if p.id not in target.network.node[obj.id].ports:
target.network.node[obj.id].add_port(id=p.id,
properties=p.properties)
# TODO: Flowrules are not copied!
log.debug("Copy port %s to NFFG element %s" % (p, obj))
return target
@classmethod
def _copy_node_type_with_flowrules (cls, type_iter, target, log,
copy_shallow=False):
"""
Copies all element from iterator if it is not in target, and merges their
port lists.
:param type_iter: Iterator on objects to be added
:type type_iter: :any: iterator on `Node`
:param target: The target NFFG
:type target: :any: `NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the updated base NFFG
:rtype: :class:`NFFG`
"""
for obj in type_iter:
if obj.id not in target:
c_obj = target.add_node(obj if copy_shallow else deepcopy(obj))
log.debug("Copy NFFG node: %s" % c_obj)
else:
for p in obj.ports:
if p.id not in target.network.node[obj.id].ports:
new_port = target.network.node[obj.id].add_port(id=p.id,
properties=p.properties)
log.debug("Copy port %s to NFFG element %s" % (p, obj))
if hasattr(p, 'flowrules'):
log.debug("Merging flowrules of port %s of node %s" %
(p.id, obj.id))
for fr in p.flowrules:
if fr.id not in (f.id for f in new_port.flowrules):
new_port.flowrules.append(fr if copy_shallow else
copy.deepcopy(fr))
else:
old_port = target.network.node[obj.id].ports[p.id]
for fr in p.flowrules:
if fr.id not in (f.id for f in old_port.flowrules):
old_port.flowrules.append(fr if copy_shallow else
copy.deepcopy(fr))
return target
@classmethod
def merge_nffgs (cls, target, new, log=logging.getLogger("UNION"),
copy_shallow=False):
"""
Merges new `NFFG` to target `NFFG` keeping all parameters and copying
port object from new. Comparison is done based on object id, resources and
requirements are kept unchanged in target.
:type copy_shallow: If set to True, set only references to the copied
objects instead of deep copies.
:param target: target NFFG object
:type target: :class:`NFFG`
:param new: NFFG object to merge from
:type new: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: the updated base NFFG
:rtype: :class:`NFFG`
"""
# Copy Infras
target = cls._copy_node_type_with_flowrules(new.infras, target, log,
copy_shallow)
# Copy NFs
target = cls._copy_node_type(new.nfs, target, log)
# Copy SAPs
target = cls._copy_node_type(new.saps, target, log)
# Copy remaining links which should be valid
for u, v, link in new.network.edges_iter(data=True):
if not target.network.has_edge(u, v, key=link.id):
src_port = target.network.node[u].ports[link.src.id]
dst_port = target.network.node[v].ports[link.dst.id]
tmp_src, tmp_dst = link.src, link.dst
link.src = link.dst = None
c_link = link if copy_shallow else deepcopy(link)
c_link.src = src_port
c_link.dst = dst_port
link.src, link.dst = tmp_src, tmp_dst
target.add_link(src_port=src_port, dst_port=dst_port, link=c_link)
log.debug("Copy Link: %s" % c_link)
return target
@classmethod
def subtract_nffg (cls, minuend, subtrahend, consider_vnf_status=False,
ignore_infras=False):
"""
Deletes every (all types of) node from minuend which have higher degree in
subtrahend. And removes every (all types of) edge from minuend which are
present in subtrahend. Changes minuend, but doesn't change subtrahend.
NOTE: a node cannot be decreased to degree 0, because then it will be
removed.
:param minuend: minuend NFFG object
:type minuend: :class:`NFFG`
:param subtrahend: NFFG object to be subtracted
:type subtrahend: :class:`NFFG`
:param consider_vnf_status: consider VNF status
:type consider_vnf_status: bool
:param ignore_infras: ignore infra nodes
:type ignore_infras: bool
:return: NFFG which is minuend \ subtrahend
:rtype: :class:`NFFG`
"""
if ignore_infras:
minuend_degrees = {}
for nf in minuend.nfs:
minuend_degrees[nf.id] = len(minuend.adjacent_sghops(nf.id))
subtrahend_degrees = [(nf.id, len(subtrahend.adjacent_sghops(nf.id))) \
for nf in subtrahend.nfs]
else:
minuend_degrees = minuend.network.degree()
subtrahend_degrees = subtrahend.network.degree().iteritems()
for n, d in subtrahend_degrees:
if n in minuend_degrees:
if d >= minuend_degrees[n]:
# If their status shall be considered AND the statuses are equal then
# they are considered equal and it shouldn't be in the minuend.
if not consider_vnf_status or (consider_vnf_status and
subtrahend.network.node[
n].status ==
minuend.network.node[n].status):
for edge_func in (minuend.network.in_edges_iter,
minuend.network.out_edges_iter):
for i, j, data in edge_func([n], data=True):
if data.type == 'SG':
minuend.del_flowrules_of_SGHop(data.id)
minuend.del_node(minuend.network.node[n])
for i, j, k, d in subtrahend.network.edges_iter(keys=True, data=True):
if minuend.network.has_edge(i, j, key=k):
minuend.del_edge(i, j, k)
if d.type == 'SG':
minuend.del_flowrules_of_SGHop(d.id)
return minuend
@classmethod
def generate_difference_of_nffgs (cls, old, new, ignore_infras=False):
"""
Creates two NFFG objects which can be used in NFFG.MODE_ADD and
NFFG.MODE_DEL
operation modes of the mapping algorithm. Doesn't modify input objects.
If infra nodes shall be ignored, node degree comparison is only based on
SGHops, but the output structure still contains the infras which were in
the input.
:param old: old NFFG object
:type old: :class:`NFFG`
:param new: NFFG object of the new config
:type new: :class:`NFFG`
:param ignore_infras: ignore infra nodes
:type ignore_infras: bool
:return: a tuple of NFFG-s for addition and deletion resp. on old config.
:rtype: tuple
"""
add_nffg = copy.deepcopy(new)
add_nffg.mode = NFFG.MODE_ADD
del_nffg = copy.deepcopy(old)
del_nffg.mode = NFFG.MODE_DEL
add_nffg = NFFGToolBox.subtract_nffg(add_nffg, old,
consider_vnf_status=True,
ignore_infras=ignore_infras)
del_nffg = NFFGToolBox.subtract_nffg(del_nffg, new,
ignore_infras=ignore_infras)
# WARNING: we always remove the EdgeReqs from the delete NFFG, this doesn't
# have a defined meaning so far.
for req in [r for r in del_nffg.reqs]:
del_nffg.del_edge(req.src, req.dst, req.id)
# NOTE: It should be possible to delete an NF, which is not connected
# anywhere. With setting and using the operation field of NFs, NFs with
# no connected SGhops are possible.
# for n, d in [t for t in del_nffg.network.nodes(data=True)]:
# if del_nffg.network.out_degree(n) + del_nffg.network.in_degree(n) == 0:
# del_nffg.del_node(d)
# NOTE: set operation delete to filter removing NFs which wouldn't have
# left any more connected SGHops.
for del_nf in del_nffg.nfs:
if del_nf.id in old.network.nodes_iter() and \
del_nf.id not in new.network.nodes_iter():
del_nf.operation = NFFG.OP_DELETE
# The output ADD NFFG shall still include the Infras even if they were
# ignored during the difference calculation.
# Copy data from new NFFG to old NFFG
add_nffg.id = del_nffg.id = new.id
add_nffg.name = del_nffg.name = new.name
add_nffg.metadata = new.metadata.copy()
del_nffg.metadata = new.metadata.copy()
return add_nffg, del_nffg
##############################################################################
# --------------------- Mapping-related NFFG operations ----------------------
##############################################################################
@staticmethod
def _find_infra_link (nffg, port, outbound=True, accept_dyn=False):
"""
Returns the object of a static link which is connected to 'port'.
If None is returned, we can suppose that the port is dynamic.
:param nffg: NFFG object which contains port.
:type nffg: :class:`NFFG`
:param port: The port which should be the source or destination.
:type port: :any:`Port`
:param outbound: Determines whether outbound or inbound link should be found
:type outbound: bool
:param accept_dyn: accepts DYNAMIC links too
:type outbound: bool
:return: found static link or None
:rtype: :any:`Link`
"""
link = None
if outbound:
edges_func = nffg.network.out_edges_iter
else:
edges_func = nffg.network.in_edges_iter
for i, j, d in edges_func([port.node.id], data=True):
if d.type == 'STATIC' or (accept_dyn and d.type == 'DYNAMIC'):
if outbound and port.id == d.src.id:
if link is not None:
raise RuntimeError("InfraPort %s has more than one outbound "
"links!" % port.id)
link = d
if not outbound and port.id == d.dst.id:
if link is not None:
raise RuntimeError("InfraPort %s has more than one inbound "
"links!" % port.id)
link = d
if link is None:
raise RuntimeError(" ".join(("Dynamic" if accept_dyn else "Static",
"outbound" if outbound else "inbound",
"link couldnt be found connected to port",
str(port))))
return link
@staticmethod
def try_to_convert (id):
"""
Tries to convert a string type ID to integer (base 10).
:param id: ID to be converted
:type id: str
:return: integer ID if it can be converted, string otherwise
:rtype: int
"""
converted = id
try:
converted = int(id)
except ValueError:
pass
return converted
@staticmethod
def _extract_flowclass (splitted_matches):
"""
Interprets the match field of a flowrule as everything is flowclass except
"TAG=" and "in_port=" fields. Returns the string to be put into the
flowclass field. Hopefully the order of the match segments are kept or
irrelevant.
:param splitted_matches: elements of the match field
:type splitted_matches: list
:return: flowclass value
:rtype: str
"""
flowclass = ""
for match in splitted_matches:
field, mparam = match.split("=", 1)
if field == "flowclass":
flowclass += mparam
elif field != "TAG" and field != "in_port":
flowclass += "".join((field, "=", mparam))
if flowclass == "":
return None
else:
return flowclass
@staticmethod
def _extract_additional_actions (splitted_actions):
"""
Interprets the action field of a flowrule as every action is additional,
which are not used for traffic steering such as "UNTAG" and "output"
actions. Returns the string to be but into the additional_actions field.
:param splitted_actions: elements of the action fields
:type splitted_actions: list
:return: additional actions
:rtype: str
"""
additional_actions = ""
for action in splitted_actions:
action_2_list = action.split("=", 1)
field = action_2_list[0]
mparam = ""
if len(action_2_list) == 2:
mparam = action_2_list[1]
if field != "UNTAG" and field != "output" and field != "TAG":
# if there is ate least one additional action, they should be
# separated by ";"-s.
if additional_actions != "":
additional_actions += ";"
additional_actions += field if mparam == "" else \
"".join((field, "=", mparam))
if additional_actions == "":
return None
else:
return additional_actions
@staticmethod
def _get_flowrule_and_its_starting_port (infra, fr_id):
"""
Finds the Flowrule which belongs to the path of SGHop with ID 'fr_id'.
:param infra: Infra object where we should look for the Flowrule
:type infra: :any:`NodeInfra`
:param fr_id: Flowrule/SGHop ID to look for
:type fr_id: int
:return: Flowrule and its containing InfraPort
:rtype: tuple
"""
for p in infra.ports:
for fr in p.flowrules:
if fr.id == fr_id:
return fr, p
else:
return None, None
@staticmethod
def get_inport_of_flowrule (infra, fr_id):
"""
Finds the Flowrule which belongs to the path of SGHop with ID 'fr_id'.
:param infra: Infra object where we should look for the Flowrule
:type infra: :any:`NodeInfra`
:param fr_id: Flowrule/SGHop ID to look for
:type fr_id: int
:return: Flowrule and its containing InfraPort
:rtype: tuple
"""
for p in infra.ports:
for fr in p.flowrules:
if fr.id == fr_id:
return p
else:
raise RuntimeError("Couldn't find Flowrule for SGHop %s in Infra %s!"
% (fr_id, infra.id))
@staticmethod
def get_output_port_of_flowrule (infra, fr):
"""
Find the port object where this Flowrule sends the traffic out.
:param infra: Infra object where we should look for the InfraPort.
:type infra: :any:`NodeInfra`
:param fr: flowrule object
:type fr: :class:`Flowrule`
:return: The output infra port.
:rtype: :any:`InfraPort`
"""
for action in fr.action.split(";"):
comm, arg = action.split("=", 1)
if comm == 'output':
if "://" in arg:
# target-less flow rule -> skip
return
arg = NFFGToolBox.try_to_convert(arg)
return infra.ports[arg]
else:
raise RuntimeError("Couldn't find output InfraPort object for Flowrule %s"
" in Infra%s!" % (fr.id, infra.id))
@staticmethod
def _check_flow_consistency (sg_map, fr_sg):
"""
Checks whether there is an inconsistency with Flowrule or SGHop 'fr_sg'
and the other flowrules which are part of the SGHop's sequence OR SGHop
which is in sg_map. Throws runtime exception if error found.
Uses only the common fields of Flowrules and SGHops.
'flowclass' needs to be extracted if 'fr_sg' is not an SGHop.
:param sg_map: SGHop sequence
:type sg_map: dict
:param fr_sg: checked flowentry or SGhop
:type fr_sg: :class:`Flowrule` or :class:`EdgeSGLink`
:return: None
"""
if isinstance(fr_sg, Flowrule):
flowclass = NFFGToolBox._extract_flowclass(fr_sg.match.split(";"))
else:
flowclass = fr_sg.flowclass
consistent = True
if sg_map[fr_sg.id][2] != flowclass:
consistent = False
if (sg_map[fr_sg.id][3] is None or sg_map[fr_sg.id][3] == float("inf")) != \
(fr_sg.bandwidth is None or fr_sg.bandwidth == float("inf")):
# If not both of them are None
consistent = False
elif (sg_map[fr_sg.id][3] is not None) and (fr_sg.bandwidth is not None):
if consistent and math.fabs(sg_map[fr_sg.id][3] - fr_sg.bandwidth) > 1e-8:
consistent = False
if (sg_map[fr_sg.id][4] is None or sg_map[fr_sg.id][4] == 0.000000000) != \
(fr_sg.delay is None or fr_sg.delay == 0.0000000000):
# If not both of them are None
consistent = False
elif (sg_map[fr_sg.id][4] is not None) and (fr_sg.delay is not None):
if math.fabs(sg_map[fr_sg.id][4] - fr_sg.delay) > 1e-8:
consistent = False
if not consistent:
raise RuntimeError("Not all data of a Flowrule equal to the other "
"Flowrules of the sequence for the SGHop %s! Or the"
" SGHop to be added differs in data from the existing"
" SGHop!" % fr_sg.id)
@staticmethod
def get_all_sghop_info (nffg, return_paths=False,
log=logging.getLogger("SG-RECREATE")):
"""
Returns a dictionary keyed by sghopid, data is [PortObjsrc,
PortObjdst, SGHop.flowclass, SGHop.bandwidth, SGHop.delay,
SGHop.constraints,
SGHop.additional_action] list of port objects.
Source and destination VNF-s can be retrieved from port references
(port.node.id). The function 'recreate_all_sghops' should receive this exact
NFFG object and the output of this function.
It is based exclusively on flowrules, flowrule ID-s are equal to the
corresponding SGHop's ID.
If return_paths is set, the last element in the dict values is always an
unordered list of the STATIC link references, which are used by the flowrule
sequence. Doesn't change the input NFFG, only returns the SGHop values,
SGHops are not added.
:param nffg: the processed NFFG object
:type nffg: :class:`NFFG`
:param return_paths: flag for returning paths
:type return_paths: bool
:return: extracted values
:rtype: dict
"""
class MissingFlowruleEndingPort(Exception):
pass
sg_map = {}
for i in nffg.infras:
for p in i.ports:
for fr in p.flowrules:
try:
# if fr.external:
# continue
if fr.id not in sg_map:
# The path is unordered!!
path_of_shop = []
flowclass = NFFGToolBox._extract_flowclass(fr.match.split(";"))
sg_map[fr.id] = [None, None, flowclass, fr.bandwidth, fr.delay,
fr.constraints, None]
# We have to find the BEGINNING of this flowrule sequence.
inbound_link = NFFGToolBox._find_infra_link(nffg, p,
outbound=False,
accept_dyn=True)
while inbound_link.type != 'DYNAMIC':
path_of_shop.append(inbound_link)
if inbound_link.src.node.type == 'SAP':
break
# The link is STATIC, and its src is not SAP so it is an Infra.
prev_fr, prev_p = \
NFFGToolBox._get_flowrule_and_its_starting_port(
inbound_link.src.node, fr.id)
if prev_fr == None:
raise MissingFlowruleEndingPort()
NFFGToolBox._check_flow_consistency(sg_map, prev_fr)
inbound_link = NFFGToolBox._find_infra_link(nffg, prev_p,
outbound=False,
accept_dyn=True)
# 'inbound_link' is DYNAMIC here or it is STATIC and starts from
# a SAP,
# so the sequence starts here
sg_map[fr.id][0] = inbound_link.src
# We have to find the ENDING of this flowrule sequence.
output_port = NFFGToolBox.get_output_port_of_flowrule(i, fr)
if output_port is None:
continue
outbound_link = NFFGToolBox._find_infra_link(nffg, output_port,
outbound=True,
accept_dyn=True)
while outbound_link.type != 'DYNAMIC':
path_of_shop.append(outbound_link)
if outbound_link.dst.node.type == 'SAP':
break
# The link is STATIC and its dst is not a SAP so it is an Infra.
next_fr, _ = NFFGToolBox._get_flowrule_and_its_starting_port(
outbound_link.dst.node, fr.id)
if next_fr == None:
raise MissingFlowruleEndingPort()
# '_' is 'outbound_link.dst'
next_output_port = NFFGToolBox.get_output_port_of_flowrule(
outbound_link.dst.node, next_fr)
NFFGToolBox._check_flow_consistency(sg_map, next_fr)
outbound_link = NFFGToolBox._find_infra_link(nffg,
next_output_port,
outbound=True,
accept_dyn=True)
# the 'outbound_link' is DYNAMIC here or finishes in a SAP, so the
# flowrule sequence finished here.
sg_map[fr.id][1] = outbound_link.dst
# the additional action is only present in the last flowrule of
# the flowrule sequence.
for last_fr in nffg.network.node[outbound_link.src.node.id]. \
flowrules():
# we need to retrieve this last flowrule
if last_fr.id == fr.id:
# extract the additional action if there is any
additional_action = NFFGToolBox._extract_additional_actions(
last_fr.action.split(";"))
sg_map[fr.id][6] = additional_action
break
if return_paths:
sg_map[fr.id].append(path_of_shop)
except MissingFlowruleEndingPort:
del sg_map[fr.id]
log.warn("Couldn't find Flowrule for SGHop %s in Infra %s!" %
(fr.id, i.id))
return sg_map
@staticmethod
def recreate_all_sghops (nffg):
"""
Extracts the SGHop information from the input NFFG, and creates the SGHop
objects in the NFFG.
:param nffg: the NFFG to look for SGHop info and to modify
:type nffg: :class:`NFFG`
:return: the modified NFFG
:rtype: :class:`NFFG`
"""
sg_map = NFFGToolBox.get_all_sghop_info(nffg)
for sg_hop_id, data in sg_map.iteritems():
src, dst, flowclass, bandwidth, delay, constraints, \
additional_actions = data
if not (src and dst):
continue
if not nffg.network.has_edge(src.node.id, dst.node.id, key=sg_hop_id):
nffg.add_sglink(src, dst, id=sg_hop_id, flowclass=flowclass,
bandwidth=bandwidth, delay=delay,
constraints=constraints,
additional_actions=additional_actions)
# causes unnecessary failures, when bandwidth or delay is missing
# somewhere
# else:
# sg_hop = nffg.network[src.node.id][dst.node.id][sg_hop_id]
# NFFGToolBox._check_flow_consistencity(sg_map, sg_hop)
return nffg
@staticmethod
def retrieve_and_purge_all_tag_info (nffg):
"""
Searches all match fields of all flowrules for some possible tag values,
which may come from inter domain traffic steering and may use technology
specific tagging for a neighbouring domain.
This info is gathered for all incoming flowrule sequences and returned in
a dictionary keyed by the flowrule ID/SGHop ID. The gathered tags are
deleted from all encountered flowrules (thus the input NFFG is modified).
Tag info is also gathered from SGHops to the dictionary and consistency
is checked if needed.
:param nffg: base NFFG
:type nffg: :class:`NFFG`
:return: dict indexed by flowrule ID.
:rtype: dict
"""
# the set of tags which shall be considered. Possibly needed to modify!
possible_tag_infos = ("dl_vlan", "mpls_label")
# WARNING: we can't differentiate ethertypes based only on the presence of
# mpls_label, other fields should be processed too. But only one layer of
# MLPS is supported in OVS currently, whose format we use in Flowrules
untag_actions = ("strip_vlan", "pop_mpls:0x8847")
tag_info_all_sghops = {}
for infra in nffg.infras:
for fr in infra.flowrules():
for match_element in fr.match.split(";"):
match_element_splitted = match_element.split("=")
if len(match_element_splitted) == 2:
if match_element_splitted[0] in possible_tag_infos:
if fr.id not in tag_info_all_sghops:
# save the tag_info
tag_info_all_sghops[fr.id] = match_element
elif tag_info_all_sghops[fr.id] != match_element:
# we have found another flowrule which has a different
# possible_tag_info.
raise RuntimeError(
"The flowrule sequence of flowrule %s in infra %s has "
"multiple match fields "
"which may be used for interdomain traffic steering ("
"%s) so it cannot be decided which one to use." %
(fr, infra.id, possible_tag_infos))
# delete this match element from the match of the flowrule
# and the ; separators too, in case they are left on the
# beginning or ending.
# we can delete this tag info from other flowrules of the same
# flowrule sequence too, because the abstract tag will be used
# during the mapping.
fr.match = fr.match.replace(match_element, ""). \
rstrip(";").lstrip(";")
# we need to gather tag_info-s from SGHops too, if flowrules are
# not present, but SGHops are. If both are present, check consistency
# between them.
for sg in nffg.sg_hops:
if sg.tag_info is not None:
if sg.id in tag_info_all_sghops:
if tag_info_all_sghops[sg.id] != sg.tag_info:
raise RuntimeError(
"Technology specific interdomain tag info is "
"inconsistent in SGHop %s tag value: %s and "
"one of its flowrules with tag value %s" %
(sg, sg.tag_info, tag_info_all_sghops[sg.id]))
else:
# add the SGHop's tag_info to the dictionary for later usage.
tag_info_all_sghops[sg.id] = sg.tag_info
# we need to check whether any tag_info is already included in flowclass
# field, if so, we need to delete it, because from now on, we take care
# of this field of the match.
if sg.flowclass is not None and sg.id in tag_info_all_sghops:
sg.flowclass = sg.flowclass.replace(tag_info_all_sghops[sg.id],
"").rstrip(";").lstrip(";")
# if the flowclass disappears, let's set it back to None
if sg.flowclass == "":
sg.flowclass = None
# we need to add the corresponding untag actions for every tag_info field.
for sg in nffg.sg_hops:
if sg.id in tag_info_all_sghops:
for tag_info, untag_action in zip(possible_tag_infos, untag_actions):
if tag_info in tag_info_all_sghops[sg.id]:
tag_info_all_sghops[sg.id] = (
tag_info_all_sghops[sg.id], untag_action)
# delete the possibly present untag action from the additional
# actions, from now on, we take care of that.
if sg.additional_actions is not None:
sg.additional_actions = sg.additional_actions. \
replace(untag_action, "").rstrip(";").lstrip(";")
# if there are no additional actions left, change it back to None
if sg.additional_actions == "":
sg.additional_actions = None
return tag_info_all_sghops
@staticmethod
def redirect_flowrules (from_port, to_port, infra, mark_external=False,
log=logging.getLogger("MOVE")):
"""
Redirect flowrules from `from` to `to_port` handling match/action fields.
:param from_port: origin port
:type from_port: :class:`InfraPort`
:param to_port: target port
:type to_port: :class:`InfraPort`
:param infra: container node
:type infra: :class:`NodeInfra`
:param mark_external: mark redirected flowrule as external
:type mark_external: bool
:param log: additional logger
:type log: :any:`logging.Logger`
:return: None
"""
# Flowrules pointing to the from_port -> rewrite output reference in action
for port in infra.ports:
for fr in port.flowrules:
output = fr.action.split(';', 1)[0].split('=', 1)[1]
try:
output = int(output)
except ValueError:
pass
if output == from_port.id:
# Rewrite output tag
fr.action = fr.action.replace("output=%s" % output,
"output=%s" % to_port.id, 1)
if mark_external:
fr.external = True
log.debug("Rewritten inbound flowrule: %s" % fr)
# Contained flowrules need to be rewritten and moved to the target port
for fr in from_port.flowrules:
# Rewrite in_port tag
fr.match = fr.match.replace(fr.match.split(';', 1)[0],
"in_port=%s" % to_port.id, 1)
if mark_external:
fr.external = True
# Move flowrule
to_port.flowrules.append(fr)
log.debug("Moved outbound flowrule: %s" % fr)
# Clear flowrule list
del from_port.flowrules[:]
@classmethod
def merge_external_ports (cls, nffg, log=logging.getLogger("MERGE")):
"""
Merge detected external ports in nodes of given `nffg`
and only leave the original SAP port.
:param nffg: container node
:type nffg: :class:`NFFG`
:param log: additional logger
:type log: :any:`logging.Logger`
:return: None
"""
for infra in nffg.infras:
for ext_port in [p for p in infra.ports if p.role == "EXTERNAL"]:
log.debug("Found external port: %s" % ext_port)
# Collect ports with the same SAP tag
origin_port = [p for p in infra.ports if p.sap == ext_port.sap and
p.role != "EXTERNAL"]
if len(origin_port) != 1:
log.error("Original port for external port: %s is not found uniquely:"
" %s" % (ext_port, origin_port))
continue
origin_port = origin_port.pop()
log.debug("Detected original port for %s -> %s" % (ext_port.id,
origin_port))
# Move flowrules
log.debug("Redirect external port %s traffic into %s..."
% (ext_port, origin_port))
cls.redirect_flowrules(from_port=ext_port, to_port=origin_port,
infra=infra, mark_external=True, log=log)
# Remove external port
log.debug("Remove external SAP: %s" % ext_port.id)
nffg.del_node(node=nffg[ext_port.id])
infra.ports.remove(ext_port)
@classmethod
def isStaticInfraPort (cls, G, p):
"""
Return true if there is a Static outbound or inbound EdgeLink, false if
there
is a Dynamic outbound or inbound link, throws exception if both, or warning
if multiple of the same type.
:param G: raw networkx graph object
:type G: :class:`MultiDiGraph`
:param p: port object
:type p: :class:`Port`
:return: whether the checked port is static Infra Port
:rtype: bool
"""
static_link_found = False
dynamic_link_found = False
for edge_func, src_or_dst in ((G.out_edges_iter, 'src'),
(G.in_edges_iter, 'dst')):
for i, j, k, link in edge_func([p.node.id], data=True, keys=True):
src_or_dst_port = getattr(link, src_or_dst)
# check if we have found the right port
if src_or_dst_port.id == p.id:
if link.type == NFFG.TYPE_LINK_DYNAMIC:
dynamic_link_found = True
elif link.type == NFFG.TYPE_LINK_STATIC:
static_link_found = True
if dynamic_link_found and static_link_found:
raise RuntimeError(
"An InfraPort should either be connected to STATIC or DYNAMIC links "
"Both STATIC and DYNAMIC in/outbound links found to port %s of Infra "
"%s" % (p.id, p.node.id))
elif not dynamic_link_found and not static_link_found:
# If a port is found which is not connected to any STATIC or DYNAMIC link
return False
elif static_link_found:
return True
elif dynamic_link_found:
return False
@classmethod
def explodeGraphWithPortnodes (cls, G, id_connector_character='&'):
"""
Makes ports of the original graph into the nodes of a new NetworkX graph,
adds delay values onto edge data. The returned graph can be used by standard
networkx algorithms.
WARNING: if called with a G, which has parallel nodes, the link data will
be overridden with one of the parallel links.
:param id_connector_character: character which is used to concatenate and
separate port IDs from/to node IDs
:type id_connector_character: str
:param G: raw networkx graph object
:type G: :class:`DiGraph`
:return: created graph object
:rtype: :class:`DiGraph`
"""
exploded_G = nx.DiGraph()
for id, obj in G.nodes_iter(data=True):
if obj.type == NFFG.TYPE_INFRA:
static_ports_of_infra = filter(
lambda pp, graph=G: NFFGToolBox.isStaticInfraPort(G, pp),
obj.ports)
# NOTE: obj.id == p.node.id because of iterating on obj.ports
static_ports_of_infra_global_ids = map(
lambda pp, c=id_connector_character: id_connector_character.join(
(str(pp.id), str(pp.node.id))), static_ports_of_infra)
exploded_G.add_nodes_from(static_ports_of_infra_global_ids)
# all of them should already have the weight set to non negative float
bandwidth_based_node_weight = obj.weight if hasattr(obj, 'weight') \
else 0.0
if type(obj.resources.delay) == type(dict):
# delay is dict of dicts storing the directed distances between ports
for port1, distances in obj.resources.delay.iteritems():
for port2, dist in distances.iteritems():
exploded_G.add_edge(
id_connector_character.join((str(port1), obj.id)),
id_connector_character.join((str(port2), obj.id)),
attr_dict={'delay': dist,
'weight': bandwidth_based_node_weight})
else:
# support filling the delay matrix even if the node has only a single
# delay value, for partial backward compatibility and convenience
universal_node_delay = obj.resources.delay if obj.resources.delay \
is not None else 0.0
for i in static_ports_of_infra_global_ids:
for j in static_ports_of_infra_global_ids:
if i != j:
exploded_G.add_edge(i, j,
attr_dict={'delay': universal_node_delay,
'weight':
bandwidth_based_node_weight})
elif obj.type == NFFG.TYPE_SAP:
sap_port_found = False
for p in obj.ports:
if not sap_port_found:
exploded_G.add_node(
id_connector_character.join((str(p.id), p.node.id)))
else:
exploded_G.add_node(
id_connector_character.join((str(p.id), p.node.id)))
# TODO: In this case multiple nodes in the exploded graph shuold be
# connected with 0 delay links!
# log.warn("Multiple ports found in SAP object!")
# all ports are added as nodes, and the links between the ports denoting the
# shortest paths inside the infra node are added already.
# Add links connecting infra nodes and SAPs
for i, j, link in G.edges_iter(data=True):
if link.type == NFFG.TYPE_LINK_STATIC:
# if a link delay is None, we should take it as 0ms delay.
link_delay = link.delay if link.delay is not None else 0.0
link_weight = link.weight if hasattr(link, 'weight') else 0.0
exploded_G.add_edge(
id_connector_character.join((str(link.src.id), str(i))),
id_connector_character.join((str(link.dst.id), str(j))),
attr_dict={'delay': link_delay, 'weight': link_weight,
'static_link_id': link.id})
return exploded_G
@classmethod
def addOriginalNodesToExplodedGraph (cls, sources, destinations, exploded_G,
id_connector_character='&'):
"""
Modifies the exploded_G to add original nodes from G, and connects
them with zero weighted and delayed links to all corresponding exploded p
ort nodes. Elements of 'sources' are towards the graph and elements of
'destinations' are towards the original nodes. This is needed so we could
calculate paths from an Infra node, without needing to decide which
outbound port we want to use.
:param G:
:param exploded_G:
:param id_connector_character:
:return:
"""
# exploded_G.add_nodes_from(sources)
# exploded_G.add_nodes_from(destinations)
for i in exploded_G.nodes():
# if id_connector_character in i:
original_node_id = NFFGToolBox.try_to_convert(
i.split(id_connector_character)[1])
# the add_edge function adds the node if that is not there yet
if original_node_id in sources:
exploded_G.add_edge(original_node_id, i,
attr_dict={'delay': 0, 'weight': 0})
elif original_node_id in destinations:
exploded_G.add_edge(i, original_node_id,
attr_dict={'delay': 0, 'weight': 0})
return exploded_G
@classmethod
def purgeExplodedGraphFromOriginalNodes (cls, G, exploded_G,
id_connector_character='&'):
"""
Deletes all original nodes from the exploded graph and all of its connected
edges to gain back the pure exploded graph without original nodes.
:param G:
:param exploded_G:
:param id_connector_character:
:return:
"""
for i in exploded_G.nodes():
if type(i) == str and id_connector_character in i:
i = NFFGToolBox.try_to_convert(i.split(id_connector_character)[1])
if i in G and i in exploded_G:
# removes all connected edges as well
exploded_G.remove_node(i)
return exploded_G
@classmethod
def extractDistsFromExploded (cls, G, exploded_dists,
id_connector_character='&'):
"""
Extracts the shortest path length matrix from the calculation result on the
exploded graph structure.
:param G: raw networkx graph object
:type G: :class:`DiGraph`
:param exploded_dists: exploded graph structure
:type exploded_dists: dict
:param id_connector_character: character which is used to concatenate and
separate port IDs from/to node IDs
:type id_connector_character: str
:return: shortest path length matrix in 2 dict
:rtype: tuple
"""
dist = defaultdict(lambda: defaultdict(lambda: float('inf')))
min_dist_pairs = defaultdict(lambda: defaultdict(lambda: None))
for u, obju in G.nodes_iter(data=True):
# SAPs and Infras are handled the same at this point.
if obju.type == NFFG.TYPE_INFRA or obju.type == NFFG.TYPE_SAP:
# a list of (global_port_id, dist_dict) tuples
possible_dicts = filter(
lambda tup, original_id=u, sep=id_connector_character:
original_id == NFFGToolBox.try_to_convert(tup[0].split(sep)[1]),
exploded_dists.iteritems())
for v, objv in G.nodes_iter(data=True):
if objv.type == NFFG.TYPE_INFRA or objv.type == NFFG.TYPE_SAP:
possible_ending_nodes = filter(
lambda portid, original_id=v, sep=id_connector_character:
original_id == NFFGToolBox.try_to_convert(portid.split(sep)[1]),
exploded_dists.iterkeys())
# now we need to choose the minimum of the possible distances.
for starting_node, d in possible_dicts:
for ending_node in possible_ending_nodes:
if ending_node in d:
if d[ending_node] < dist[NFFGToolBox.try_to_convert(u)][
NFFGToolBox.try_to_convert(v)]:
dist[NFFGToolBox.try_to_convert(u)][
NFFGToolBox.try_to_convert(v)] = d[ending_node]
min_dist_pairs[u][v] = (starting_node, ending_node)
# convert default dicts to dicts for safety reasons
for k in dist:
dist[k] = dict(dist[k])
for k in min_dist_pairs:
min_dist_pairs[k] = dict(min_dist_pairs[k])
return dict(dist), dict(min_dist_pairs)
@classmethod
def extractPathsFromExploded (cls, exploded_paths_dict, min_dist_pairs,
id_connector_character='&'):
"""
Extracts and transforms paths from the matrix of shortest paths
calculated on the exploded graph structure.
:param exploded_paths_dict: exploded paths
:type exploded_paths_dict: dict
:param min_dist_pairs: minimal distance pairs
:type min_dist_pairs:
:param id_connector_character: character which is used to concatenate and
separate port IDs from/to node IDs
:type id_connector_character: str
:return: extracted paths
:rtype: dict
"""
min_length_paths = defaultdict(lambda: defaultdict(lambda: None))
for original_starting_node, d in min_dist_pairs.iteritems():
for original_ending_node, tup in d.iteritems():
exploded_path = exploded_paths_dict[tup[0]][tup[1]]
# get only the exploded IDs, which come from node ID-s
path_with_only_node_ids = filter(
lambda lid, sep=id_connector_character: sep in lid, exploded_path)
# transform them back to the original ID-s
path_with_original_node_ids = map(
lambda lid, sep=id_connector_character: lid.split(sep)[1],
path_with_only_node_ids)
# the startgin and ending node ID may not be in place
if path_with_original_node_ids[0] != original_starting_node:
path_with_original_node_ids.insert(0, original_starting_node)
if path_with_original_node_ids[-1] != original_ending_node:
path_with_original_node_ids.append(original_ending_node)
# a transit infra appears twice in the path after each other, because
# there was an inbound and an outbound port.
path_with_original_node_ids_no_duplicates = [
path_with_original_node_ids[0]]
for n in path_with_original_node_ids:
if n != path_with_original_node_ids_no_duplicates[-1]:
path_with_original_node_ids_no_duplicates.append(n)
path_with_original_node_ids_no_duplicates_str = map(
lambda node_id: NFFGToolBox.try_to_convert(node_id),
path_with_original_node_ids_no_duplicates)
min_length_paths[original_starting_node][original_ending_node] = \
path_with_original_node_ids_no_duplicates_str
# convert embedded default dicts
for k in min_length_paths:
min_length_paths[k] = dict(min_length_paths[k])
return dict(min_length_paths)
@classmethod
def extractPathsLinkIDsFromExplodedPath (cls, exploded_G, exploded_paths_list,
id_connector_character='&'):
"""
Extracts the static link ID-s of the given paths based on the exploded
graph. Assumes that the exploded_G generation added a 'static_link_id'
attribute to the exploded versions of static links and the paths were
calculated on the exploded graph where the original nodes are added.
:param exploded_G:
:param exploded_paths_list:
:param id_connector_character:
:return: list of link and node ids, preserving the order in exploded_paths
"""
extracted_paths_list = []
extracted_path_linkids_list = []
for exploded_path in exploded_paths_list:
extracted_path = []
extracted_path_linkids = []
# the path must start from an original node!
last_node = exploded_path[0]
# integer node IDs must be converted if possible.
extracted_path.append(NFFGToolBox.try_to_convert(last_node))
for node in exploded_path[1:]:
if id_connector_character not in node and node != exploded_path[-1]:
raise RuntimeError("Inner elements of the exploded path must contain "
"the ID connector character (%s), but the path "
"is %s" % (id_connector_character, exploded_path))
elif node != exploded_path[-1]:
# integer node IDs must be converted if possible.
original_node_id = NFFGToolBox.try_to_convert(
node.split(id_connector_character)[1])
if original_node_id != extracted_path[-1]:
# this graph must have such a link, otherwise there wouldn't be a
# path
if 'static_link_id' in exploded_G[last_node][node]:
extracted_path.append(original_node_id)
extracted_path_linkids.append(NFFGToolBox.try_to_convert(
exploded_G[last_node][node]['static_link_id']))
else:
# The last node is added by the exploded path's one-before-last
# element, so this branch would be skipped anyway to avoid duplicating
# the last node element
pass
# last node must be valid in the exploded_G
last_node = node
extracted_paths_list.append(extracted_path)
extracted_path_linkids_list.append(extracted_path_linkids)
return extracted_paths_list, extracted_path_linkids_list
@classmethod
def shortestPathsInLatency (cls, G, return_paths=False, exploded_G=None,
id_connector_character='&'):
"""
Calculates shortest pased considering latencies between Infra node ports.
Uses only the infrastructure part of an NFFG, non Infra nodes doesn't have
internal forwarding latencies.
:param G: raw networkx graph object
:type G: :class:`DiGraph`
:param return_paths: whether return with path
:type return_paths: bool
:param id_connector_character: character which is used to concatenate and
separate port IDs from/to node IDs
:type id_connector_character: str
:return: shortest path and optionally the extracted path
:rtype: dict or tuple
"""
if exploded_G is None:
exploded_G = NFFGToolBox.explodeGraphWithPortnodes(G,
id_connector_character)
exploded_dists = nx.all_pairs_dijkstra_path_length(exploded_G,
weight='delay')
dists, min_dist_pairs = NFFGToolBox.extractDistsFromExploded(G,
exploded_dists,
id_connector_character)
if return_paths:
exploded_paths = nx.all_pairs_dijkstra_path(exploded_G,
weight='delay')
paths = NFFGToolBox.extractPathsFromExploded(exploded_paths,
min_dist_pairs,
id_connector_character)
return paths, dists
else:
return dists
@staticmethod
def strip_nfs_flowrules_sghops_ports (nffg, log):
"""
Makes a bare NFFG object from the input.
:param nffg:
:param log:
:return:
"""
# This removes most of the SGHops as well
for nf_id in [n.id for n in nffg.nfs]:
nffg.del_node(nf_id)
# Remove the remaining SGHops
for sgh in [sg for sg in nffg.sg_hops]:
nffg.del_edge(sgh.src, sgh.dst, id=sgh.id)
# Remove possible edge_reqs
for req in [r for r in nffg.reqs]:
nffg.del_edge(req.src, req.dst, id=req.id)
# Clear all flowrules
for infra in nffg.infras:
for p in infra.ports:
p.clear_flowrules()
port_deleted = False
try:
NFFGToolBox._find_infra_link(nffg, p, True, True)
except RuntimeError as re:
log.warn(
"InfraPort of %s may not have in/outbound link "
"connected to it, message: %s" % (infra.id, re.message))
infra.del_port(p.id)
port_deleted = True
if not port_deleted:
try:
NFFGToolBox._find_infra_link(nffg, p, False, True)
except RuntimeError as re:
log.warn(
"InfraPort of %s may not have in/outbound link "
"connected to it, message: %s" % (infra.id, re.message))
infra.del_port(p.id)
return nffg
| apache-2.0 | -8,901,977,784,718,506,000 | 37.507836 | 88 | 0.587949 | false | 3.663719 | false | false | false |
jonnyhuck/GlobeMaker | makeGlobe.py | 1 | 8397 | #!/usr/bin/env python
import mapnik, ogr, osr, pyproj, os, sys, getopt
from PIL import Image
###
# Draw a Rhumb line with nPoints nodes
# @author jonnyhuck
###
def getRhumb(startlong, startlat, endlong, endlat, nPoints):
# calculate distance between points
g = pyproj.Geod(ellps='WGS84')
# calculate line string along path with segments <= 1 km
lonlats = g.npts(startlong, startlat, endlong, endlat, nPoints)
# npts doesn't include start/end points, so prepend/append them and return
lonlats.insert(0, (startlong, startlat))
lonlats.append((endlong, endlat))
return lonlats
###
# Write a geometry to a Shapefile
# @author jonnyhuck
###
def makeShapefile(geom, name, layer_name):
# set up the shapefile driver
driver = ogr.GetDriverByName("ESRI Shapefile")
# remove old shapefile if required
if os.path.exists(name):
driver.DeleteDataSource(name)
# create the data source
data_source = driver.CreateDataSource(name)
# create the spatial reference, WGS84
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
# create the layer
layer = data_source.CreateLayer(layer_name, srs, ogr.wkbPolygon)
# create the feature
feature = ogr.Feature(layer.GetLayerDefn())
# Set the feature geometry using the point
feature.SetGeometry(geom)
# Create the feature in the layer (shapefile)
layer.CreateFeature(feature)
# Destroy the feature to free resources
feature.Destroy()
# Destroy the data source to free resources
data_source.Destroy()
###
# Make a single Gore
# @author jonnyhuck
###
def makeGore(central_meridian, gore_width, number, width, gore_stroke):
# WGS84
source = osr.SpatialReference()
source.ImportFromEPSG(4326)
# Spherical Sinusoidal
original = osr.SpatialReference()
original.ImportFromProj4("+proj=sinu +lon_0=0 +x_0=0 +y_0=0 +a=6371000 +b=6371000 +units=m +no_defs ")
# Spherical Sinusoidal with gore-specific central meridian
target = osr.SpatialReference()
target.ImportFromProj4('+proj=sinu +lon_0=' + str(central_meridian) + ' +x_0=0 +y_0=0 +a=6371000 +b=6371000 +units=m +no_defs')
# get the main points of the area of interest and transform
halfWidth = gore_width / 2
mainPoints = ogr.Geometry(ogr.wkbLinearRing)
mainPoints.AddPoint(central_meridian, 90)
mainPoints.AddPoint(central_meridian - halfWidth, 0)
mainPoints.AddPoint(central_meridian, -90)
mainPoints.AddPoint(central_meridian + halfWidth, 0)
# make the gore (using mainPoints in their wgs84 form)
gore = getRhumb(mainPoints.GetX(1), mainPoints.GetY(0), mainPoints.GetX(1), mainPoints.GetY(2), 100) # get the first rhumb (N-S)
gore2 = getRhumb(mainPoints.GetX(3), mainPoints.GetY(2), mainPoints.GetX(3), mainPoints.GetY(0), 100) # get the second rhumb (S-N)
gore.extend(gore2) # combine them into one
# create ring for the gore
ring = ogr.Geometry(ogr.wkbLinearRing)
for p in gore:
ring.AddPoint(p[0], p[1])
# if invalid, do something more elegant than the fix below
# if ring.IsValid() == False:
# create polygon for the gore
clipper = ogr.Geometry(ogr.wkbPolygon)
clipper.AddGeometry(ring)
clipper.CloseRings()
# print clipper.ExportToJson()
# write to shapefile
makeShapefile(clipper, "tmp/tmp_gore" + str(number) + ".shp", "gore")
# open countries file and get all of the geometry
shapefile = "ne_110m_land/ne_110m_land.shp"
driver = ogr.GetDriverByName("ESRI Shapefile")
dataSource = driver.Open(shapefile, 0)
layer = dataSource.GetLayer()
land = ogr.Geometry(ogr.wkbGeometryCollection)
for feature in layer:
land.AddGeometry(feature.GetGeometryRef())
# clip against the gore
landPanel = clipper.Intersection(land)
# write to shapefile
makeShapefile(landPanel, "tmp/tmp_land" + str(number) + ".shp", "land")
# clean up
clipper.Destroy()
landPanel.Destroy()
# make bounding box for the output
transform = osr.CoordinateTransformation(source, original)
# points for the bounding box
bbPoints = ogr.Geometry(ogr.wkbLinearRing)
bbPoints.AddPoint(0, 90)
bbPoints.AddPoint(-halfWidth, 0)
bbPoints.AddPoint(0, -90)
bbPoints.AddPoint(halfWidth, 0)
bbPoints.Transform(transform)
# make the map
map = mapnik.Map(width, width)
map.srs = target.ExportToProj4()
map.background = mapnik.Color('#ffffff')
# add and style gore
s = mapnik.Style()
r = mapnik.Rule()
polygon_symbolizer = mapnik.PolygonSymbolizer(mapnik.Color('#000000'))
r.symbols.append(polygon_symbolizer)
s.rules.append(r)
map.append_style('land_style',s)
ds = mapnik.Shapefile(file="./tmp/tmp_land" + str(number) + ".shp")
land = mapnik.Layer('land')
land.datasource = ds
land.styles.append('land_style')
map.layers.append(land)
# add and style gore
s = mapnik.Style()
r = mapnik.Rule()
line_symbolizer = mapnik.LineSymbolizer(mapnik.Color('#000000'), gore_stroke)
r.symbols.append(line_symbolizer)
s.rules.append(r)
map.append_style('gore_style',s)
ds = mapnik.Shapefile(file="./tmp/tmp_gore" + str(number) + ".shp")
gore = mapnik.Layer('gore')
gore.datasource = ds
gore.styles.append('gore_style')
map.layers.append(gore)
# this grows the image if the map dimensions do not fit the canvas dimensions
map.aspect_fix_mode = mapnik.aspect_fix_mode.GROW_CANVAS
# Set the extent (need to set this to around 0 post transformation as this is the central meridian)
map.zoom_to_box(mapnik.Envelope(bbPoints.GetX(1), bbPoints.GetY(0), bbPoints.GetX(3), bbPoints.GetY(2)))
# render to file (and show me it)
mapnik.render_to_file(map, "tmp/gore" + str(number) + ".png")
##
# Main Function
# @author jonnyhuck
##
def main(argv):
# make sure the tmp folder exists
if not os.path.exists("tmp"):
os.makedirs("tmp")
# set defaults
GORE_WIDTH_PX = 500
GORE_WIDTH_DEG = 60
OUT_PATH = "globe.png"
GORE_OUTLINE_WIDTH = 4
# read in arguments
try:
opts, args = getopt.getopt(argv, "hp:d:g:o:")
except getopt.GetoptError:
print 'python makeGlobe.py -p [GORE_WIDTH_PX] -d [GORE_WIDTH_DEGREES] -g [GORE_OUTLINE_WIDTH] -o [OUT_PATH]'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'python makeGlobe.py -p [GORE_WIDTH_PX] -d [GORE_WIDTH_DEGREES] -g [GORE_OUTLINE_WIDTH] -o [OUT_PATH]'
sys.exit()
elif opt == '-p':
GORE_WIDTH_PX = int(arg)
elif opt == '-d':
GORE_WIDTH_DEG = int(arg)
elif opt == '-g':
GORE_OUTLINE_WIDTH = int(arg)
elif opt == '-o':
OUT_PATH = arg
# verify values
if GORE_WIDTH_PX < 0:
print "invalid -p (GORE_WIDTH_PX) value: " + str(GORE_WIDTH_PX)
print "GORE_WIDTH_DEG must be >0."
sys.exit(0)
if GORE_WIDTH_DEG < 15 or GORE_WIDTH_DEG > 120 or 360 % GORE_WIDTH_DEG > 0:
print "invalid -d (GORE_WIDTH_DEG) value: " + str(GORE_WIDTH_PX)
print "GORE_WIDTH_DEG must be >=15, <=120 and multiply into 360."
print "Valid numbers include: 120, 90, 60, 30, 20, 15"
sys.exit(0)
# how many gores?
I = 360 / GORE_WIDTH_DEG
# make a test gore to see how big it is
makeGore(0, GORE_WIDTH_DEG, 666, GORE_WIDTH_PX, 0)
im666 = Image.open("tmp/gore666.png")
w,h = im666.size
# make 6 gores and join them together into a single image
# TODO: HOW CAN I WORK OUT 1497?
im = Image.new("RGB", (GORE_WIDTH_PX * I, h), "white")
for i in range(0, I):
cm = -180 + (GORE_WIDTH_DEG/2) + (GORE_WIDTH_DEG * i)
# blunt fix - stops data wrapping around the world
if i == I-1:
cm -= 0.01
print cm
makeGore(cm, GORE_WIDTH_DEG, i, GORE_WIDTH_PX, GORE_OUTLINE_WIDTH)
im1 = Image.open("tmp/gore" + str(i) + ".png")
im.paste(im1, (GORE_WIDTH_PX * i,0))
# clean up all tmp files
files = os.listdir("tmp")
for f in files:
os.remove("tmp/"+f)
# export and display
im.save(OUT_PATH)
im.show()
##
# Python nonsense...
# @author jonnyhuck
##
if __name__ == "__main__":
main(sys.argv[1:]) | lgpl-3.0 | -3,680,023,338,081,080,300 | 30.931559 | 137 | 0.640229 | false | 3.127374 | false | false | false |
rjschwei/azure-sdk-for-python | azure-mgmt-dns/azure/mgmt/dns/models/record_set.py | 1 | 4228 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class RecordSet(Model):
"""Describes a DNS record set (a collection of DNS records with the same name
and type).
:param id: The ID of the record set.
:type id: str
:param name: The name of the record set.
:type name: str
:param type: The type of the record set.
:type type: str
:param etag: The etag of the record set.
:type etag: str
:param metadata: The metadata attached to the record set.
:type metadata: dict
:param ttl: The TTL (time-to-live) of the records in the record set.
:type ttl: long
:param arecords: The list of A records in the record set.
:type arecords: list of :class:`ARecord <azure.mgmt.dns.models.ARecord>`
:param aaaa_records: The list of AAAA records in the record set.
:type aaaa_records: list of :class:`AaaaRecord
<azure.mgmt.dns.models.AaaaRecord>`
:param mx_records: The list of MX records in the record set.
:type mx_records: list of :class:`MxRecord
<azure.mgmt.dns.models.MxRecord>`
:param ns_records: The list of NS records in the record set.
:type ns_records: list of :class:`NsRecord
<azure.mgmt.dns.models.NsRecord>`
:param ptr_records: The list of PTR records in the record set.
:type ptr_records: list of :class:`PtrRecord
<azure.mgmt.dns.models.PtrRecord>`
:param srv_records: The list of SRV records in the record set.
:type srv_records: list of :class:`SrvRecord
<azure.mgmt.dns.models.SrvRecord>`
:param txt_records: The list of TXT records in the record set.
:type txt_records: list of :class:`TxtRecord
<azure.mgmt.dns.models.TxtRecord>`
:param cname_record: The CNAME record in the record set.
:type cname_record: :class:`CnameRecord
<azure.mgmt.dns.models.CnameRecord>`
:param soa_record: The SOA record in the record set.
:type soa_record: :class:`SoaRecord <azure.mgmt.dns.models.SoaRecord>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'metadata': {'key': 'properties.metadata', 'type': '{str}'},
'ttl': {'key': 'properties.TTL', 'type': 'long'},
'arecords': {'key': 'properties.ARecords', 'type': '[ARecord]'},
'aaaa_records': {'key': 'properties.AAAARecords', 'type': '[AaaaRecord]'},
'mx_records': {'key': 'properties.MXRecords', 'type': '[MxRecord]'},
'ns_records': {'key': 'properties.NSRecords', 'type': '[NsRecord]'},
'ptr_records': {'key': 'properties.PTRRecords', 'type': '[PtrRecord]'},
'srv_records': {'key': 'properties.SRVRecords', 'type': '[SrvRecord]'},
'txt_records': {'key': 'properties.TXTRecords', 'type': '[TxtRecord]'},
'cname_record': {'key': 'properties.CNAMERecord', 'type': 'CnameRecord'},
'soa_record': {'key': 'properties.SOARecord', 'type': 'SoaRecord'},
}
def __init__(self, id=None, name=None, type=None, etag=None, metadata=None, ttl=None, arecords=None, aaaa_records=None, mx_records=None, ns_records=None, ptr_records=None, srv_records=None, txt_records=None, cname_record=None, soa_record=None):
self.id = id
self.name = name
self.type = type
self.etag = etag
self.metadata = metadata
self.ttl = ttl
self.arecords = arecords
self.aaaa_records = aaaa_records
self.mx_records = mx_records
self.ns_records = ns_records
self.ptr_records = ptr_records
self.srv_records = srv_records
self.txt_records = txt_records
self.cname_record = cname_record
self.soa_record = soa_record
| mit | -6,092,007,763,563,084,000 | 45.461538 | 248 | 0.617313 | false | 3.592184 | false | false | false |
thom-at-redhat/cfme_tests | cfme/storage/managers.py | 1 | 5166 | # -*- coding: utf-8 -*-
from functools import partial
from cfme import web_ui as ui
from cfme.web_ui.menu import nav
from cfme.exceptions import StorageManagerNotFound
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import Form, InfoBlock, MultiFill, Region, SplitTable, fill, flash
from cfme.web_ui import form_buttons, paginator, toolbar
from utils.update import Updateable
from utils.wait import wait_for
list_page = Region(locators=dict(
managers_table=SplitTable(
header_data=("//div[@id='list_grid']/div[@class='xhdr']/table/tbody", 1),
body_data=("//div[@id='list_grid']/div[@class='objbox']/table/tbody", 1),
),
))
cfg_btn = partial(toolbar.select, "Configuration")
def _get_sm_name(o):
if isinstance(o, StorageManager):
return o.name
else:
return str(o)
def _find_and_click_sm(context):
"""Incorporates searching through the page listing and clicking in the table. Also ensures
waiting for the transition as there is no ajax hook."""
sm_name = _get_sm_name(context["storage_manager"])
for page in paginator.pages():
if sel.is_displayed("#no_records_div"):
break
if list_page.managers_table.click_cell("name", sm_name):
sel.wait_for_element("#textual_div") # No ajax wait there :(
return
raise StorageManagerNotFound("Storage manager with name '{}' not found!".format(sm_name))
nav.add_branch(
"storage_managers",
{
"storage_manager_new": lambda _: cfg_btn("Add a New Storage Manager"),
"storage_manager": [
_find_and_click_sm,
{
"storage_manager_edit": lambda _: cfg_btn("Edit this Storage Manager"),
}
]
}
)
class StorageManager(Updateable):
"""Represents the Storage / Storage Managers object. Allows interaction
Args:
name: Name of the Storage Namager as it appears in the UI.
type: Type of the Storage Manager (eg. StorageManager.NETAPP_RS, ...)
hostname: Host name of the machine.
ip: IP Address of the machine.
port: Port of the machine.
credentials: :py:class:`dict` or :py:class:`StorageManager.Credential`
"""
class Credential(Updateable):
def __init__(self, username=None, password=None):
self.username = username
self.password = password
form = Form(fields=[
("name", ui.Input("name")),
("type", ui.Select("select#sm_type")),
("hostname", ui.Input("hostname")),
("ip", ui.Input("ipaddress")),
("port", ui.Input("port")),
("credentials", Form(fields=[
("username", ui.Input("userid")),
("password", MultiFill(
ui.Input("password"), ui.Input("verify")
))
])),
])
validate = form_buttons.FormButton("Validate the credentials by logging into the Server")
add = form_buttons.FormButton("Add this Storage Manager")
##
# Types constants. Extend if needed :)
NETAPP_RS = "NetApp Remote Service"
def __init__(self, name=None, type=None, hostname=None, ip=None, port=None, credentials=None):
self.name = name
self.type = type
self.hostname = hostname
self.ip = ip
self.port = port
self.credentials = credentials
def create(self, validate=True, cancel=False):
sel.force_navigate("storage_manager_new")
fill(self.form, self)
if validate:
sel.click(self.validate)
if cancel:
sel.click(form_buttons.cancel)
else:
sel.click(self.add)
flash.assert_no_errors()
def update(self, updates, validate=True, cancel=False):
sel.force_navigate("storage_manager_edit", context={"storage_manager": self})
fill(self.form, updates)
if validate:
sel.click(self.validate)
if cancel:
sel.click(form_buttons.cancel)
else:
sel.click(form_buttons.save)
flash.assert_no_errors()
def delete(self, cancel=False):
self.navigate()
cfg_btn("Remove this Storage Manager from the VMDB", invokes_alert=True)
sel.handle_alert(cancel)
flash.assert_no_errors()
def navigate(self):
sel.force_navigate("storage_manager", context={"storage_manager": self})
def refresh_inventory(self):
self.navigate()
cfg_btn("Refresh Inventory", invokes_alert=True)
sel.handle_alert(cancel=False)
flash.assert_no_errors()
def refresh_status(self):
self.navigate()
cfg_btn("Refresh Status", invokes_alert=True)
sel.handle_alert(cancel=False)
flash.assert_no_errors()
def wait_until_updated(self, num_sec=300):
def _wait_func():
self.navigate()
return InfoBlock("Properties", "Last Update Status").text.strip().lower() == "ok"
wait_for(_wait_func, num_sec=num_sec, delay=5)
@property
def exists(self):
try:
self.navigate()
return True
except StorageManagerNotFound:
return False
| gpl-2.0 | -8,675,807,006,659,604,000 | 32.115385 | 98 | 0.609756 | false | 3.934501 | false | false | false |
movio/maxwell-faker | setup.py | 1 | 1173 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='maxwell-faker',
version='0.1.0',
description='Maxwell faker for systems and load testing',
url='https://github.com/movio/maxwell-faker',
author='Nicolas Maquet and Nan Wu',
author_email='nicolas@movio.co, nan@movio.co',
license='MIT',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Database',
'Topic :: Software Development :: Testing'
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
keywords='maxwell faker fake data generator json kafka mysql',
packages=find_packages(exclude=['tests']),
install_requires=[
'PyYAML',
'kafka-python'
],
entry_points={
'console_scripts': [
'maxwell-faker=maxwell_faker:daemon_main',
'maxwell-faker-bootstrap=maxwell_faker:bootstrap_main',
'maxwell-faker-gen=maxwell_faker:gen_main',
],
},
)
| mit | -8,835,322,440,469,028,000 | 31.583333 | 67 | 0.604433 | false | 3.796117 | false | false | false |
gdelnegro/django-translation-server | translation_server/forms.py | 1 | 1927 | # -*- coding: utf-8 -*-
# Created by Gustavo Del Negro <gustavodelnegro@gmail.com> on 9/30/16.
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from translation_server.models import *
from django.urls import reverse
class TranslationAdminForm(forms.ModelForm):
languages_list = [lang[0].replace('-', '_') for lang in settings.LANGUAGES]
translations_url = forms.CharField(max_length=200, widget=forms.HiddenInput())
translation_type_url = forms.CharField(max_length=200, widget=forms.HiddenInput())
last_translation_tag_url = forms.CharField(max_length=200, widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
kwargs.update(initial={
'translations_url': reverse('api:translations-list'),
'translation_type_url': reverse('api:translations_types-list'),
'last_translation_tag_url': reverse('get_last_translation_tag', args=[0])[:-1],
})
super(TranslationAdminForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(TranslationAdminForm, self).clean()
for language in self.languages_list:
if cleaned_data.get("text_"+language) == cleaned_data.get('auxiliary_text_'+language):
self.add_error('auxiliary_text_' + language,
forms.ValidationError(_('DTSE1')))
return cleaned_data
def save(self, commit=False):
translation = super(TranslationAdminForm, self).save(commit=commit)
translation.save()
translation.migration_created = False
translation.save()
return translation
class Meta:
model = Translation
fields = "__all__"
class TranslationTypeAdminForm(forms.ModelForm):
class Meta:
model = TranslationType
fields = "__all__" | mit | 3,482,051,199,576,771,000 | 37.56 | 98 | 0.654385 | false | 4.153017 | false | false | false |
pirata-cat/mieli | geo/helpers.py | 1 | 2853 | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from geo.models import Geoname
from geo.api import location
from mieli.api import nexus
from django import forms
import re
def __admin2_sanitizer(admin2):
admin2.name = admin2.name.split(' ')[-1]
return admin2
def build_administrative_2_divisions(admin1_code):
admin2_divisions = map(__admin2_sanitizer, location.load_administrative_2_divisions(admin1_code.countrycode, admin1_code.admin1_code))
return [('', '')] + sorted(map(lambda x: (x.geonameid, x.name[0].upper() + x.name[1:]), admin2_divisions), key=lambda x: x[1])
def build_places(admin1_code):
places = location.load_places(admin1_code.countrycode, admin1_code.admin1_code)
return [('', '')] + sorted(map(lambda x: (x.geonameid, x.name[0].upper() + x.name[1:]), places), key=lambda x: x[1])
def set_extra_fields(**kwargs):
form = kwargs['form']
form.initial['administrative_division'] = ''
form.initial['place'] = ''
fields = form.fields
catalonia = filter(lambda x: x.name == 'Catalonia', location.load_administrative_divisions('ES'))[0]
fields['administrative_division'] = forms.ChoiceField(label=_('Província'), choices=build_administrative_2_divisions(catalonia))
fields['place'] = forms.ChoiceField(label=_('Municipi'), choices=build_places(catalonia))
return kwargs
def clean_extra_fields(form, **kwargs):
if not 'administrative_division' in form.cleaned_data:
form.add_error('administrative_division', _('Indica una província'))
return
if not 'place' in form.cleaned_data:
form.add_error('place', _('Indica un municipi'))
return
if form.cleaned_data['administrative_division'] == '':
form.add_error('administrative_division', _('Indica una província'))
return
if form.cleaned_data['place'] == '':
form.add_error('place', _('Indica un municipi'))
return
def on_user_creation(user, **kwargs):
if 'location' in kwargs:
if kwargs['location'] == None:
if 'place' in kwargs:
if kwargs['place'] == '':
raise Exception('Place missing')
kwargs['location'] = kwargs['place']
else:
return
else:
if not 'place' in kwargs:
return
kwargs['location'] = kwargs['place']
place = Geoname.objects.get(geonameid=kwargs['location'])
administrative_division_id = None
if 'administrative_division' in kwargs:
administrative_division_id = kwargs['administrative_division']
l = location.save(user, place, administrative_division_id)
#nexus_ = nexus.get(name=l.admin2.name.split(' ')[-1])
#if nexus_ == None:
# raise Exception("Nexus not found for '%s'" % l.admin2.name)
#nexus_.join(user)
| agpl-3.0 | 7,802,266,255,789,061,000 | 42.227273 | 139 | 0.645636 | false | 3.39239 | false | false | false |
heromod/migrid | mig/server/edituser.py | 1 | 4234 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# edituser - Edit a MiG user
# Copyright (C) 2003-2013 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Edit MiG user in user database and file system"""
import getopt
import os
import sys
from shared.useradm import init_user_adm, edit_user
def usage(name='edituser.py'):
"""Usage help"""
print """Edit existing user in MiG user database and file system.
Usage:
%(name)s [OPTIONS] -i USER_ID [FULL_NAME] [ORGANIZATION] [STATE] [COUNTRY] \
[EMAIL] [COMMENT] [PASSWORD]
Where OPTIONS may be one or more of:
-c CONF_FILE Use CONF_FILE as server configuration
-d DB_FILE Use DB_FILE as user data base file
-f Force operations to continue past errors
-h Show this help
-i CERT_DN CERT_DN of user to edit
-v Verbose output
"""\
% {'name': name}
# ## Main ###
if '__main__' == __name__:
(args, app_dir, db_path) = init_user_adm()
conf_path = None
force = False
verbose = False
user_id = None
user_dict = {}
opt_args = 'c:d:fhi:v'
try:
(opts, args) = getopt.getopt(args, opt_args)
except getopt.GetoptError, err:
print 'Error: ', err.msg
usage()
sys.exit(1)
for (opt, val) in opts:
if opt == '-c':
conf_path = val
elif opt == '-d':
db_path = val
elif opt == '-f':
force = True
elif opt == '-h':
usage()
sys.exit(0)
elif opt == '-i':
user_id = val
elif opt == '-v':
verbose = True
else:
print 'Error: %s not supported!' % opt
if conf_path and not os.path.isfile(conf_path):
print 'Failed to read configuration file: %s' % conf_path
sys.exit(1)
if verbose:
if conf_path:
print 'using configuration in %s' % conf_path
else:
print 'using configuration from MIG_CONF (or default)'
if not user_id:
print 'Error: Existing user ID is required'
usage()
sys.exit(1)
if args:
try:
user_dict['full_name'] = args[0]
user_dict['organization'] = args[1]
user_dict['state'] = args[2]
user_dict['country'] = args[3]
user_dict['email'] = args[4]
except IndexError:
# Ignore missing optional arguments
pass
else:
print 'Please enter the new details for %s:' % user_id
print '[enter to skip field]'
user_dict['full_name'] = raw_input('Full Name: ').title()
user_dict['organization'] = raw_input('Organization: ')
user_dict['state'] = raw_input('State: ')
user_dict['country'] = raw_input('2-letter Country Code: ')
user_dict['email'] = raw_input('Email: ')
# Remove empty value fields
for (key, val) in user_dict.items():
if not val:
del user_dict[key]
if verbose:
print 'Update DB entry and dirs for %s: %s' % (user_id, user_dict)
try:
user = edit_user(user_id, user_dict, conf_path, db_path, force,
verbose)
except Exception, err:
print err
sys.exit(1)
print '%s\nchanged to\n%s\nin user database and file system' % \
(user_id, user['distinguished_name'])
print
print 'Please revoke/reissue any related certificates!'
| gpl-2.0 | 7,567,648,195,852,900,000 | 28.816901 | 81 | 0.577232 | false | 3.704287 | false | false | false |
apache/cloudstack-ec2stack | tests/password_tests.py | 3 | 2316 | #!/usr/bin/env python
# encoding: utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import mock
from ec2stack.helpers import read_file, generate_signature
from . import Ec2StackAppTestCase
class PasswordTestCase(Ec2StackAppTestCase):
def test_get_password_data(self):
data = self.get_example_data()
data['Action'] = 'GetPasswordData'
data['InstanceId'] = 'Test'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/valid_instance_get_password.json'
)
get.return_value.status_code = 200
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_ok(response)
assert 'GetPasswordDataResponse' in response.data
def test_invalid_get_password(self):
data = self.get_example_data()
data['Action'] = 'GetPasswordData'
data['InstanceId'] = 'Test'
data['Signature'] = generate_signature(data, 'POST', 'localhost', '/')
get = mock.Mock()
get.return_value.text = read_file(
'tests/data/invalid_instance_get_password.json'
)
get.return_value.status_code = 431
with mock.patch('requests.get', get):
response = self.post(
'/',
data=data
)
self.assert_bad_request(response)
assert 'InvalidInstanceId.NotFound' in response.data
| apache-2.0 | 5,318,389,458,403,325,000 | 32.085714 | 78 | 0.64076 | false | 3.986231 | true | false | false |
versae/ortograbot | bot.py | 1 | 6753 | # -*- coding: utf-8 -*-
import langid
import logging
import pymongo
import os
import re
import sys
import twitter
import urllib
from datetime import datetime
from datetime import timedelta
from random import choice
from random import randint
logging.basicConfig()
logger = logging.getLogger(__name__)
class OrtograBot(object):
"""
OrtograBot searches for certain orthographic errors on twitter and reports
back to the user with the proper form.
"""
def __init__(self, mongodb_url=None):
"""Setup MongoDB databse, Twitter API and rules"""
mongodb_url = os.environ.get("MONGOHQ_URL", mongodb_url)
self.debug = bool(os.environ.get("DEBUG", True))
client = pymongo.MongoClient(mongodb_url)
self.db = client[mongodb_url.rsplit("/", 1)[1]]
credentials = self.db.twitterCredentials.find_one()
self.username = credentials["username"]
self.api = twitter.Api(
consumer_key=credentials["consumer_key"],
consumer_secret=credentials["consumer_secret"],
access_token_key=credentials["access_token_key"],
access_token_secret=credentials["access_token_secret"]
)
self.rules = [
{
"search": u"tí",
"message": u"ti nunca lleva tilde → "
u"http://buscon.rae.es/dpd/?key=ti&origen=REDPD",
"lang": u"es",
},
{
"search": u"cuidate",
"message": u"cuídate es esdrújula, "
u"por lo que siempre lleva tilde → "
u"http://buscon.rae.es/dpd/?key=tilde#113",
"lang": u"es",
},
{
"search": u"corazon",
"message": u"corazón es aguda acabada en -n, "
u"por lo que siempre lleva tilde → "
u"http://buscon.rae.es/dpd/?key=tilde#111",
"lang": u"es",
},
{
"search": u"bicep",
"message": u"la palabra «bicep» no existe, "
u"es bíceps, llana y con tilde por acabar en -s "
u"precedida de consonante → "
u"http://lema.rae.es/dpd/?key=b%C3%ADceps",
"lang": u"es",
},
{
"search": u"biceps",
"message": u"bíceps es llana y acabada en -s "
u"precedida de consonante, "
u"por lo que siempre lleva tilde → "
u"http://lema.rae.es/dpd/?key=b%C3%ADceps",
"lang": u"es",
}
]
self.punctuation = re.compile(r"[ \.,\?\!¡¿\n\t\-]+")
self.emojis = [
u"🐭", u"🐮", u"🐱", u"🐵", u"😁", u"😂", u"😃", u"😄", u"😅",
u"😆", u"😇", u"😈", u"😉", u"😊", u"😋", u"😌", u"😍", u"😎",
u"😏", u"😰", u"😱", u"😲", u"😳", u""]
def run_rule(self):
"""Run one random rule and reply to the twitter user if needed"""
rule = choice(self.rules)
# HACK: Using quote_plus and encode to fix a bug in python-twitter
# search function
search = urllib.quote_plus(rule["search"].encode("utf-8"))
results = self.api.GetSearch(search)
for status_obj in results:
text_lower = status_obj.text.lower()
if (rule["search"] not in self.punctuation.split(text_lower)
or self.username.lower() in text_lower
or status_obj.in_reply_to_status_id
or status_obj.retweeted
or langid.classify(status_obj.text)[0] != rule["lang"]):
continue
# To guarantee some human-like behaviour,
# it only replies 25% of the time
if randint(1, 100) > 75:
# The 75% remaining, just tweet random messages
if not self.debug:
try:
if randint(1, 100) > 85:
# 85% from the message of the rule
message = u"Recuerda: {} {}".format(
rule["message"],
choice(self.emojis)
)
# Add a random emoji icon to messages to avoid
# duplicated statuses
self.api.PostUpdate(message)
else:
# 15% a friendly message
message = (u"Soy ortolibán, "
u"tu corrector ortográfico "
u"amigo {}".format(choice(self.emojis)))
self.api.PostUpdate(message)
except Exception:
logger.error("Unexpected error: %s",
sys.exc_info()[0:2])
continue
post_time = datetime.strptime(status_obj.created_at,
'%a %b %d %H:%M:%S +0000 %Y')
now = datetime.utcnow()
one_day_ago = now - timedelta(days=1)
reply_to = {
"status_id": status_obj.id,
"screen_name": status_obj.user.screen_name,
"post_time": post_time,
"text": status_obj.text,
"reply_time": now,
"search": rule["search"],
"lang": rule["lang"],
"place": status_obj.place,
"coordinates": status_obj.coordinates,
}
user_already_messaged = self.db.messaged.find_one({
"screen_name": reply_to["screen_name"],
"search": rule["search"],
"lang": rule["lang"],
"reply_time": {"$gte": one_day_ago}
})
if not user_already_messaged:
try:
reply_message = u"@{} {}".format(reply_to["screen_name"],
rule["message"])
if not self.debug:
self.api.PostUpdate(
reply_message,
in_reply_to_status_id=status_obj.id
)
self.db.messaged.insert(reply_to, safe=True)
# We only reply to one user
break
except Exception:
logger.error("Unexpected error: %s", sys.exc_info()[0:2])
| mit | -7,809,110,081,855,427,000 | 41.433121 | 79 | 0.448064 | false | 3.900468 | false | false | false |
LarsFronius/ansible | lib/ansible/modules/network/illumos/ipadm_prop.py | 43 | 7530 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <adam.stevko@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipadm_prop
short_description: Manage protocol properties on Solaris/illumos systems.
description:
- Modify protocol properties on Solaris/illumos systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
protocol:
description:
- Specifies the procotol for which we want to manage properties.
required: true
property:
description:
- Specifies the name of property we want to manage.
required: true
value:
description:
- Specifies the value we want to set for the property.
required: false
temporary:
description:
- Specifies that the property value is temporary. Temporary
property values do not persist across reboots.
required: false
default: false
choices: [ "true", "false" ]
state:
description:
- Set or reset the property value.
required: false
default: present
choices: [ "present", "absent", "reset" ]
'''
EXAMPLES = '''
# Set TCP receive buffer size
ipadm_prop: protocol=tcp property=recv_buf value=65536
# Reset UDP send buffer size to the default value
ipadm_prop: protocol=udp property=send_buf state=reset
'''
RETURN = '''
protocol:
description: property's protocol
returned: always
type: string
sample: "TCP"
property:
description: name of the property
returned: always
type: string
sample: "recv_maxbuf"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: property's persistence
returned: always
type: boolean
sample: "True"
value:
description: value of the property. May be int or string depending on property.
returned: always
type: int
sample: "'1024' or 'never'"
'''
SUPPORTED_PROTOCOLS = ['ipv4', 'ipv6', 'icmp', 'tcp', 'udp', 'sctp']
class Prop(object):
def __init__(self, module):
self.module = module
self.protocol = module.params['protocol']
self.property = module.params['property']
self.value = module.params['value']
self.temporary = module.params['temporary']
self.state = module.params['state']
def property_exists(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-prop')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
self.module.fail_json(msg='Unknown property "%s" for protocol %s' %
(self.property, self.protocol),
protocol=self.protocol,
property=self.property)
def property_is_modified(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-prop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current,default')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
(value, default) = out.split(':')
if rc == 0 and value == default:
return True
else:
return False
def property_is_set(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('show-prop')
cmd.append('-c')
cmd.append('-o')
cmd.append('current')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
(rc, out, _) = self.module.run_command(cmd)
out = out.rstrip()
if rc == 0 and self.value == out:
return True
else:
return False
def set_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('set-prop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property + "=" + self.value)
cmd.append(self.protocol)
return self.module.run_command(cmd)
def reset_property(self):
cmd = [self.module.get_bin_path('ipadm')]
cmd.append('reset-prop')
if self.temporary:
cmd.append('-t')
cmd.append('-p')
cmd.append(self.property)
cmd.append(self.protocol)
return self.module.run_command(cmd)
def main():
module = AnsibleModule(
argument_spec=dict(
protocol=dict(required=True, choices=SUPPORTED_PROTOCOLS),
property=dict(required=True),
value=dict(required=False),
temporary=dict(default=False, type='bool'),
state=dict(
default='present', choices=['absent', 'present', 'reset']),
),
supports_check_mode=True
)
prop = Prop(module)
rc = None
out = ''
err = ''
result = {}
result['protocol'] = prop.protocol
result['property'] = prop.property
result['state'] = prop.state
result['temporary'] = prop.temporary
if prop.value:
result['value'] = prop.value
if prop.state == 'absent' or prop.state == 'reset':
if prop.property_exists():
if not prop.property_is_modified():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = prop.reset_property()
if rc != 0:
module.fail_json(protocol=prop.protocol,
property=prop.property,
msg=err,
rc=rc)
elif prop.state == 'present':
if prop.value is None:
module.fail_json(msg='Value is mandatory with state "present"')
if prop.property_exists():
if not prop.property_is_set():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = prop.set_property()
if rc != 0:
module.fail_json(protocol=prop.protocol,
property=prop.property,
msg=err,
rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 8,330,504,288,972,269,000 | 26.778598 | 83 | 0.563762 | false | 4.11591 | false | false | false |
koorukuroo/networkx_for_unicode | networkx/linalg/spectrum.py | 10 | 2052 | """
Eigenvalue spectrum of graphs.
"""
# Copyright (C) 2004-2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = "\n".join(['Aric Hagberg <aric.hagberg@gmail.com>',
'Pieter Swart (swart@lanl.gov)',
'Dan Schult(dschult@colgate.edu)'])
__all__ = ['laplacian_spectrum', 'adjacency_spectrum']
def laplacian_spectrum(G, weight='weight'):
"""Return eigenvalues of the Laplacian of G
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
laplacian_matrix
"""
from scipy.linalg import eigvalsh
return eigvalsh(nx.laplacian_matrix(G,weight=weight).todense())
def adjacency_spectrum(G, weight='weight'):
"""Return eigenvalues of the adjacency matrix of G.
Parameters
----------
G : graph
A NetworkX graph
weight : string or None, optional (default='weight')
The edge data key used to compute each value in the matrix.
If None, then each edge has weight 1.
Returns
-------
evals : NumPy array
Eigenvalues
Notes
-----
For MultiGraph/MultiDiGraph, the edges weights are summed.
See to_numpy_matrix for other options.
See Also
--------
adjacency_matrix
"""
from scipy.linalg import eigvals
return eigvals(nx.adjacency_matrix(G,weight=weight).todense())
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import scipy.linalg
except:
raise SkipTest("scipy.linalg not available")
| bsd-3-clause | -2,793,521,151,012,232,700 | 24.02439 | 67 | 0.624269 | false | 3.864407 | false | false | false |
3324fr/spinalcordtoolbox | testing/testing_sct_propseg.py | 1 | 10478 | #!/usr/bin/env python
#########################################################################################
#
# Test function sct_propseg
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Augustin Roux
# modified: 2014/10/09
#
# About the license: see the file LICENSE.TXT
#########################################################################################
#import sct_utils as sct
import commands
import shutil
import getopt
import sys
import time
import sct_utils as sct
import os
import nibabel
import numpy as np
import math
from tabulate import tabulate
class param:
def __init__(self):
self.download = 0
self.remove_tmp_file = 0
self.verbose = 1
self.url_git = 'https://github.com/benjamindeleener/PropSeg_data.git'
self.path_data = '/home/django/benjamindeleener/data/PropSeg_data/'
param.data = ['t1','t2','dmri']
def main():
# Check input parameters
try:
opts, args = getopt.getopt(sys.argv[1:],'h:d:p:r:t:')
except getopt.GetoptError:
usage()
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit(0)
if opt == '-d':
param.download = int(arg)
if opt == '-p':
param.path_data = arg
if opt == '-t':
if ',' in arg:
param.data = arg.split(',')
else:
param.data = arg
if opt == '-r':
param.remove_tmp_file = int(arg)
print param.data
start_time = time.time()
# download data
if param.download:
sct.printv('\nDownloading testing data...', param.verbose)
# remove data folder if exist
if os.path.exists('PropSeg_data'):
sct.printv('WARNING: PropSeg_data already exists. Removing it...', param.verbose, 'warning')
sct.run('rm -rf PropSeg_data')
# clone git repos
sct.run('git clone '+param.url_git)
# update path_data field
param.path_data = 'PropSeg_data'
# get absolute path and add slash at the end
param.path_data = sct.slash_at_the_end(os.path.abspath(param.path_data), 1)
# segment all data in t1 folder
results_t1 = []
sum_old,sum_new = 0,0
if 't1' in param.data:
for dirname in os.listdir(param.path_data+"t1/"):
if dirname not in ['._.DS_Store','.DS_Store']:
for filename in os.listdir(param.path_data+"t1/"+dirname):
if filename.startswith('t1') and not filename.endswith('_seg.nii.gz') and not filename.endswith('_detection.nii.gz') and not filename.endswith('.vtk'):
print dirname, filename
[d_old,d_new],[r_old,r_new] = segmentation(param.path_data+"t1/"+dirname+"/"+filename,param.path_data+"t1/"+dirname+"/",'t1')
if d_old == 0:
d_old = 'OK'
sum_old = sum_old+1
else: d_old = 'Not In'
if d_new == 0:
d_new = 'OK'
sum_new = sum_new+1
else: d_new = 'Not In'
results_t1.append([dirname,d_old,d_new,round(r_old,2),round(r_new,2)])
# compute average
results_t1.append(['average',sum_old,sum_new,np.mean([line[3] for line in results_t1]),np.mean([line[4] for line in results_t1])])
# segment all data in t2 folder
results_t2 = []
sum_old,sum_new = 0,0
if 't2' in param.data:
for dirname in os.listdir(param.path_data+"t2/"):
if dirname not in ['._.DS_Store','.DS_Store']:
for filename in os.listdir(param.path_data+"t2/"+dirname):
if filename.startswith('t2_') and not filename.endswith('_seg.nii.gz') and not filename.endswith('_detection.nii.gz') and not filename.endswith('.vtk'):
print dirname, filename
[d_old,d_new],[r_old,r_new] = segmentation(param.path_data+"t2/"+dirname+"/"+filename,param.path_data+"t2/"+dirname+"/",'t2')
if d_old == 0:
d_old = 'OK'
sum_old = sum_old+1
else: d_old = 'Not In'
if d_new == 0:
d_new = 'OK'
sum_new = sum_new+1
else: d_new = 'Not In'
results_t2.append([dirname,d_old,d_new,round(r_old,2),round(r_new,2)])
# compute average
results_t2.append(['average',sum_old,sum_new,np.mean([line[3] for line in results_t2]),np.mean([line[4] for line in results_t2])])
results_dmri = []
sum_old,sum_new = 0,0
if 'dmri' in param.data:
for dirname in os.listdir(param.path_data+"dmri/"):
if dirname not in ['._.DS_Store','.DS_Store']:
for filename in os.listdir(param.path_data+"dmri/"+dirname):
if filename.startswith('dmri') and not filename.endswith('_seg.nii.gz') and not filename.endswith('_detection.nii.gz') and not filename.endswith('.vtk'):
print dirname, filename
[d_old,d_new],[r_old,r_new] = segmentation(param.path_data+"dmri/"+dirname+"/"+filename,param.path_data+"dmri/"+dirname+"/",'t1')
if d_old == 0:
d_old = 'OK'
sum_old = sum_old+1
else: d_old = 'Not In'
if d_new == 0:
d_new = 'OK'
sum_new = sum_new+1
else: d_new = 'Not In'
results_dmri.append([dirname,d_old,d_new,round(r_old,2),round(r_new,2)])
# compute average
results_dmri.append(['average',sum_old,sum_new,np.mean([line[3] for line in results_dmri]),np.mean([line[4] for line in results_dmri])])
if 't1' in param.data:
print ''
print tabulate(results_t1, headers=["Subject-T1","Detect-old","Detect-new","DC-old", "DC-new"], floatfmt=".2f")
if 't2' in param.data:
print ''
print tabulate(results_t2, headers=["Subject-T2","Detect-old","Detect-new","DC-old", "DC-new"], floatfmt=".2f")
if 'dmri' in param.data:
print ''
print tabulate(results_dmri, headers=["Subject-dmri","Detect-old","Detect-new","DC-old", "DC-new"], floatfmt=".2f")
# display elapsed time
elapsed_time = time.time() - start_time
print 'Finished! Elapsed time: '+str(int(round(elapsed_time)))+'s\n'
# remove temp files
if param.remove_tmp_file:
sct.printv('\nRemove temporary files...', param.verbose)
sct.run('rm -rf '+param.path_tmp, param.verbose)
e = 0
for i in range(0,len(results_t2)):
if (results_t2[i][4] < 0.8 or results_t2[i][4] < results_t2[i][3]):
e = e+1
sys.exit(e)
def segmentation(fname_input, output_dir, image_type):
# parameters
path_in, file_in, ext_in = sct.extract_fname(fname_input)
segmentation_filename_old = path_in + 'old/' + file_in + '_seg' + ext_in
manual_segmentation_filename_old = path_in + 'manual_' + file_in + ext_in
detection_filename_old = path_in + 'old/' + file_in + '_detection' + ext_in
segmentation_filename_new = path_in + 'new/' + file_in + '_seg' + ext_in
manual_segmentation_filename_new = path_in + 'manual_' + file_in + ext_in
detection_filename_new = path_in + 'new/' + file_in + '_detection' + ext_in
# initialize results of segmentation and detection
results_detection = [0,0]
results_segmentation = [0.0,0.0]
# perform PropSeg old version
sct.run('rm -rf '+output_dir+'old')
sct.create_folder(output_dir+'old')
cmd = 'sct_propseg_old -i ' + fname_input \
+ ' -o ' + output_dir+'old' \
+ ' -t ' + image_type \
+ ' -detect-nii'
sct.printv(cmd)
status_propseg_old, output_propseg_old = commands.getstatusoutput(cmd)
sct.printv(output_propseg_old)
# check if spinal cord is correctly detected with old version of PropSeg
cmd = "isct_check_detection.py -i "+detection_filename_old+" -t "+manual_segmentation_filename_old
sct.printv(cmd)
status_detection_old, output_detection_old = commands.getstatusoutput(cmd)
sct.printv(output_detection_old)
results_detection[0] = status_detection_old
# compute Dice coefficient for old version of PropSeg
cmd_validation = 'sct_dice_coefficient '+segmentation_filename_old \
+ ' '+manual_segmentation_filename_old \
+ ' -bzmax'
sct.printv(cmd_validation)
status_validation_old, output_validation_old = commands.getstatusoutput(cmd_validation)
print output_validation_old
res = output_validation_old.split()[-1]
if res != 'nan': results_segmentation[0] = float(res)
else: results_segmentation[0] = 0.0
# perform PropSeg new version
sct.run('rm -rf '+output_dir+'new')
sct.create_folder(output_dir+'new')
cmd = 'sct_propseg -i ' + fname_input \
+ ' -o ' + output_dir+'new' \
+ ' -t ' + image_type \
+ ' -detect-nii'
sct.printv(cmd)
status_propseg_new, output_propseg_new = commands.getstatusoutput(cmd)
sct.printv(output_propseg_new)
# check if spinal cord is correctly detected with new version of PropSeg
cmd = "isct_check_detection.py -i "+detection_filename_new+" -t "+manual_segmentation_filename_new
sct.printv(cmd)
status_detection_new, output_detection_new = commands.getstatusoutput(cmd)
sct.printv(output_detection_new)
results_detection[1] = status_detection_new
# compute Dice coefficient for new version of PropSeg
cmd_validation = 'sct_dice_coefficient '+segmentation_filename_new \
+ ' '+manual_segmentation_filename_new \
+ ' -bzmax'
sct.printv(cmd_validation)
status_validation_new, output_validation_new = commands.getstatusoutput(cmd_validation)
print output_validation_new
res = output_validation_new.split()[-1]
if res != 'nan': results_segmentation[1] = float(res)
else: results_segmentation[1] = 0.0
return results_detection, results_segmentation
if __name__ == "__main__":
# call main function
param = param()
main() | mit | 4,765,003,380,247,198,000 | 40.583333 | 173 | 0.556308 | false | 3.517288 | false | false | false |
andykimpe/chromium-test-npapi | tools/telemetry/telemetry/page/actions/repaint_continuously.py | 8 | 1481 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from telemetry.page.actions import page_action
class RepaintContinuouslyAction(page_action.PageAction):
""" Continuously repaints the visible content by requesting animation frames
until self.seconds have elapsed AND at least three RAFs have been fired. Times
out after max(60, self.seconds), if less than three RAFs were fired.
"""
def __init__(self, attributes=None):
super(RepaintContinuouslyAction, self).__init__(attributes)
def RunAction(self, tab):
assert(hasattr(self, 'seconds'))
start_time = time.time()
tab.ExecuteJavaScript(
'window.__rafCount = 0;'
'window.__rafFunction = function() {'
'window.__rafCount += 1;'
'window.webkitRequestAnimationFrame(window.__rafFunction);'
'};'
'window.webkitRequestAnimationFrame(window.__rafFunction);')
time_out = max(60, self.seconds)
min_rafs = 3
# Wait until al leat self.seconds have elapsed AND min_rafs have been fired.
# Use a hard time-out after 60 seconds (or self.seconds).
while True:
raf_count = tab.EvaluateJavaScript('window.__rafCount;')
elapsed_time = time.time() - start_time
if elapsed_time > time_out:
break
elif elapsed_time > self.seconds and raf_count > min_rafs:
break
time.sleep(1)
| bsd-3-clause | 3,875,948,258,636,553,000 | 36.025 | 80 | 0.681972 | false | 3.807198 | false | false | false |
rapidpro/tracpro | tracpro/contacts/migrations/0015_auto_20170307_1338.py | 1 | 1034 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contacts', '0014_auto_20170210_1659'),
]
operations = [
migrations.AlterField(
model_name='contact',
name='group',
field=models.ForeignKey(related_name='contacts', verbose_name='Cohort', to='groups.Group', help_text='Cohort to which this contact belongs.', null=True),
),
migrations.AlterField(
model_name='contact',
name='groups',
field=models.ManyToManyField(help_text='All cohorts to which this contact belongs.', related_name='all_contacts', verbose_name='Cohorts', to='groups.Group'),
),
migrations.AlterField(
model_name='contact',
name='region',
field=models.ForeignKey(related_name='contacts', verbose_name='Panel', to='groups.Region', help_text='Panel of this contact.'),
),
]
| bsd-3-clause | -6,078,903,323,623,347,000 | 34.655172 | 169 | 0.609284 | false | 4.362869 | false | false | false |
indradhanush/U1DB-ZeroMQ-Transport | zmq_transport/tests/test_server.py | 1 | 5505 | # # System Imports
# import unittest
# import mock
# from mock import patch
# # ZeroMQ Imports
# import zmq
# from zmq.eventloop.ioloop import IOLoop
# # Local Imports
# import zmq_transport
# from zmq_transport.server.zmq_server import (
# ServerSocket,
# ApplicationHandler,
# ClientHandler,
# Publisher,
# Server
# )
# from zmq_transport.config.settings import (
# ENDPOINT_APPLICATION_HANDLER,
# ENDPOINT_CLIENT_HANDLER,
# ENDPOINT_PUBLISHER
# )
# class BaseServerTest(unittest.TestCase):
# """
# Base test class for server.
# """
# def __init__(self, *args, **kwargs):
# unittest.TestCase.__init__(self, *args, **kwargs)
# self.endpoint = "tcp://127.0.0.1:6789"
# def tearDown(self):
# pass
# class ServerSocketTest(BaseServerTest):
# """
# Test class for zmq_transport.server.zmq_server.ServerSocket
# """
# def test_serverSocket_run(self):
# """
# Tests ServerSocket.run
# """
# # Mocking zmq
# context = mock.Mock(spec_set=zmq.Context)
# socket = mock.Mock(spec_set=zmq.ROUTER)
# server_sock = ServerSocket(context.socket(socket), self.endpoint)
# server_sock.run()
# class ApplicationHandlerTest(BaseServerTest):
# """
# Test class for zmq_transport.server.zmq_server.ApplicationHandler
# """
# def test_applicationHandler_run(self):
# """
# Tests ApplicationHandler.run
# """
# # Mocking zmq
# context = mock.Mock(spec_set=zmq.Context)
# app_sock = ApplicationHandler(self.endpoint, context)
# app_sock.run()
# class ClientHandlerTest(BaseServerTest):
# """
# Test class for zmq_transport.server.zmq_server.ClientHandler
# """
# def test_clientHandler_run(self):
# """
# Tests ClientHandler.run
# """
# # Mocking zmq
# context = mock.Mock(spec_set=zmq.Context)
# client_sock = ClientHandler(self.endpoint, context)
# client_sock.run()
# class PublisherTest(BaseServerTest):
# """
# Test class for zmq_transport.server.zmq_server.Publisher
# """
# def test_publisher_run(self):
# """
# Tests Publisher.run
# """
# # Mocking zmq
# context = mock.Mock(spec_set=zmq.Context)
# pub_mock = Publisher(self.endpoint, context)
# pub_mock.run()
# class ServerTest(BaseServerTest):
# """
# Test class for zmq_transport.server.zmq_server.Server
# """
# def setUp(self):
# self.frontend_patcher = patch(
# "zmq_transport.server.zmq_server.ClientHandler")
# self.backend_patcher = patch(
# "zmq_transport.server.zmq_server.ApplicationHandler")
# self.publisher_patcher = patch(
# "zmq_transport.server.zmq_server.Publisher")
# self.frontend_mock = self.frontend_patcher.start()
# self.backend_mock = self.backend_patcher.start()
# self.publisher_mock = self.publisher_patcher.start()
# def check_mocks(self, server):
# # Check if classes were correctly mocked.
# assert zmq_transport.server.zmq_server.ClientHandler is\
# self.frontend_mock
# assert zmq_transport.server.zmq_server.ApplicationHandler is\
# self.backend_mock
# assert zmq_transport.server.zmq_server.Publisher is\
# self.publisher_mock
# assert server.frontend is self.frontend_mock
# assert server.backend is self.backend_mock
# assert server.publisher is self.publisher_mock
# def test_server__prepare_reactor(self):
# """
# Tests Server._prepare_reactor
# """
# server = Server(ENDPOINT_APPLICATION_HANDLER, ENDPOINT_CLIENT_HANDLER,
# ENDPOINT_PUBLISHER)
# # Patch Server instance.
# server._context = mock.Mock(spec_set=zmq.Context)
# server._loop = mock.Mock(spec_set=IOLoop)
# server.frontend = self.frontend_mock
# server.backend = self.backend_mock
# server.publisher = self.publisher_mock
# self.check_mocks(server)
# with patch("zmq.eventloop.zmqstream.ZMQStream") as zmqstream_mock:
# server._prepare_reactor()
# # TODO: Check if zmqstream_mock is called withing wrap_zmqstream
# self.assertEqual(server.frontend.wrap_zmqstream.called, True)
# self.assertEqual(server.publisher.wrap_zmqstream.called, True)
# self.assertEqual(server.backend.wrap_zmqstream.called, True)
# expected = [(("on_send", server.handle_snd_update_client), ),
# (("on_recv", server.handle_rcv_update_client), )]
# self.assertEqual(server.frontend.register_handler.call_args_list,
# expected)
# expected = [(("on_send", server.handle_snd_update_client), )]
# self.assertEqual(server.publisher.register_handler.call_args_list,
# expected)
# expected = [(("on_send", server.handle_snd_update_app), ),
# (("on_recv", server.handle_rcv_update_app), )]
# self.assertEqual(server.backend.register_handler.call_args_list,
# expected)
# def tearDown(self):
# self.frontend_patcher.stop()
# self.backend_patcher.stop()
# self.publisher_patcher.stop()
| gpl-2.0 | 6,322,063,380,592,175,000 | 31.767857 | 80 | 0.597275 | false | 3.560802 | true | false | false |
midvik/versionparser | main.py | 1 | 8709 | # -*- coding: utf-8 -*-
import requests
import sqlite3
from distutils.version import LooseVersion
import re
from bs4 import BeautifulSoup
import click
from tqdm import tqdm
import dominate
from dominate.tags import *
PORTAL_NAME = 'http://soft.mydiv.net'
DOWNLOAD_COM_SEARCH = 'http://download.cnet.com/1770-20_4-0.html?platform=Windows&searchtype=downloads&query='
SOFTPEDIA_SEARCH = 'http://win.softpedia.com/dyn-search.php?search_term='
def unique(seq):
return list(set(seq))
def get_programs_from_section(url):
result = []
soup = BeautifulSoup(download_page(url), "html.parser")
if not soup:
print("parse_site no soup!")
return result
for page_url in tqdm(get_section_pages(soup, url), desc='Parsing pages'):
ps = BeautifulSoup(download_page(page_url), "html.parser")
if not ps:
continue
for item in ps.findAll('a', {'class': 'itemname'}):
try:
result.append((PORTAL_NAME + item['href'], item.contents[0].strip(), item.span.string))
except (LookupError, AttributeError):
continue
return result
def save_program_to_db(site, program, version, sql_connection):
sql_connection.cursor().execute(
"INSERT INTO parsed(site, program, version) VALUES(?, ?, ?)", [site, program, version])
def get_section_pages(soup, url):
pages = []
page_nums = []
for raw_a in soup.findAll('td', {'class': 'page'}):
if not raw_a.text:
continue
page_num_text = raw_a.text
if page_num_text.encode('utf-8').strip() == u'···'.encode('utf-8').strip():
pass
else:
page_num = int(page_num_text)
if page_nums and (page_num - page_nums[-1]) > 1:
for i in range(page_nums[-1], page_num + 1):
pages.append(url + 'index' + str(i) + ".html")
page_nums.append(page_num)
pages.append(PORTAL_NAME + str(raw_a.a['href']))
pages = unique(pages)
pages.append(url)
return pages
def search_new_versions_by_db(sql_connection, engine):
for sql_row in tqdm(list(sql_connection.cursor().execute("SELECT program, version, site FROM parsed")),
desc='Finding updates'):
if len(sql_row) < 3:
continue
target_name, target_version, target_url = sql_row
search_page_soup = BeautifulSoup(download_page(engine + target_name), "html.parser")
if not search_page_soup:
continue
yield search_page_soup, target_name, target_version, target_url
def compare_versions_download_com(sql_connection, list_params, ver_params, content_index=None):
for search_page_soup, target_name, target_version, target_url in search_new_versions_by_db(sql_connection, DOWNLOAD_COM_SEARCH):
search_results_soup = search_page_soup.findAll(list_params[0], list_params[1])
for result in search_results_soup[:2]:
title = result.findAll('div', {'class': 'title OneLinkNoTx'})
if not title:
continue
found_name = title[0].string
found_url = result.a['href']
if target_name.lower() == found_name.lower():
found_page_soup = BeautifulSoup(download_page(found_url), "html.parser")
if not found_page_soup:
continue
if content_index:
found_version = found_page_soup.find(ver_params[0], ver_params[1]).contents[content_index]
else:
found_version = found_page_soup.find(ver_params[0], ver_params[1])
if found_version:
found_version = found_version.string
if not target_version or not found_version:
continue
yield target_name, target_version, found_name, found_version, target_url, found_url
def get_next_proxy():
while True:
with open("proxy.list", 'r') as f:
proxy_list = f.readlines()
for proxy in proxy_list:
yield f'http://{proxy}'.strip()
def compare_versions_softpedia(sql_connection, list_params):
for search_page_soup, target_name, target_version, target_url in search_new_versions_by_db(sql_connection, SOFTPEDIA_SEARCH):
for result in search_page_soup.findAll(list_params[0], list_params[1])[:2]:
found_name = result.a.string
found_url = result.a['href']
if target_name.lower() == " ".join(found_name.lower().split(' ')[:-1]):
found_page_soup = BeautifulSoup(download_page(found_url), "html.parser")
if not found_page_soup:
continue
found_version = None
pattern = re.compile('var spjs_prog_version="(.*?)";')
scripts = found_page_soup.findAll('script')
for script in scripts:
match = pattern.search(str(script.string))
if match:
found_version = match.groups()[0]
if not target_version or not found_version:
continue
yield target_name, target_version, found_name, found_version, target_url, found_url
def download_page(page_url, num_tries=3, timeout=5, proxy={}, proxy_generator=get_next_proxy()):
def change_proxy(message):
proxy_address = next(proxy_generator)
print(f'{message}. Changing proxy to {proxy_address}')
proxy['http'] = proxy_address
found_page = ''
for _ in range(num_tries):
try:
found_page = requests.get(page_url, proxies=proxy, timeout=timeout).text
except requests.exceptions.Timeout:
change_proxy("Timeout")
continue
except requests.exceptions.ProxyError:
change_proxy("Proxy error")
continue
if not len(found_page):
change_proxy("Probably banned")
else:
break
return found_page
@click.command()
@click.option('--section_url', default='http://soft.mydiv.net/win/cname72/', help='MyDiv section URL.')
@click.option('--engine', default='softpedia', help='Where to search')
def parse_section(section_url, engine):
with sqlite3.connect('example.db') as sql_connection:
clear_db(sql_connection)
for site, program, version in get_programs_from_section(section_url):
save_program_to_db(site, program, version, sql_connection)
sql_connection.commit()
if engine == 'softpedia':
results = compare_versions_softpedia(sql_connection, ('h4', {'class': 'ln'}))
elif engine == 'download.com':
results = compare_versions_download_com(sql_connection, ('div', {'id': 'search-results'}),
('tr', {'id': 'specsPubVersion'}), 3)
else:
print("Unknown engine")
return 1
create_html_results(engine, results)
def clear_db(sql_connection):
sql_connection.cursor().execute("DELETE FROM parsed")
sql_connection.commit()
def create_html_results(engine, results):
with dominate.document(title=engine) as doc:
with doc.add(table()) as data_table:
attr(border=2)
table_header = tr()
table_header += th("MyDiv")
table_header += th("Version")
table_header += th("Search result")
table_header += th("Version")
data_table.add(table_header)
try:
for target_name, target_version, found_name, found_version, target_url, found_url in results:
try:
if LooseVersion(target_version.split()[0]) < LooseVersion(found_version.split()[0]):
data_row = tr()
data_row += td(a(target_name, href=target_url))
data_row += td(target_version)
data_row += td(a(found_name, href=found_url))
data_row += td(found_version)
data_table.add(data_row)
print("On MyDiv %s %s, on search %s %s " %
(target_name, target_version, found_name, found_version))
except TypeError:
print(f"Version comparison failed on {target_version} and {found_version}")
finally:
with open(engine + ".html", "w") as f:
f.write(doc.render())
def _main():
parse_section()
if __name__ == '__main__':
exit(_main())
| gpl-2.0 | -2,215,543,210,756,732,400 | 34.104839 | 132 | 0.568229 | false | 3.955475 | false | false | false |
liangjg/openmc | openmc/deplete/results_list.py | 2 | 6005 | import h5py
import numpy as np
from .results import Results, VERSION_RESULTS
from openmc.checkvalue import check_filetype_version, check_value
__all__ = ["ResultsList"]
class ResultsList(list):
"""A list of openmc.deplete.Results objects
It is recommended to use :meth:`from_hdf5` over
direct creation.
"""
@classmethod
def from_hdf5(cls, filename):
"""Load in depletion results from a previous file
Parameters
----------
filename : str
Path to depletion result file
Returns
-------
new : ResultsList
New instance of depletion results
"""
with h5py.File(str(filename), "r") as fh:
check_filetype_version(fh, 'depletion results', VERSION_RESULTS[0])
new = cls()
# Get number of results stored
n = fh["number"][...].shape[0]
for i in range(n):
new.append(Results.from_hdf5(fh, i))
return new
def get_atoms(self, mat, nuc, nuc_units="atoms", time_units="s"):
"""Get number of nuclides over time from a single material
.. note::
Initial values for some isotopes that do not appear in
initial concentrations may be non-zero, depending on the
value of :class:`openmc.deplete.Operator` ``dilute_initial``.
The :class:`openmc.deplete.Operator` adds isotopes according
to this setting, which can be set to zero.
Parameters
----------
mat : str
Material name to evaluate
nuc : str
Nuclide name to evaluate
nuc_units : {"atoms", "atom/b-cm", "atom/cm3"}, optional
Units for the returned concentration. Default is ``"atoms"``
.. versionadded:: 0.12
time_units : {"s", "min", "h", "d"}, optional
Units for the returned time array. Default is ``"s"`` to
return the value in seconds.
.. versionadded:: 0.12
Returns
-------
times : numpy.ndarray
Array of times in units of ``time_units``
concentrations : numpy.ndarray
Concentration of specified nuclide in units of ``nuc_units``
"""
check_value("time_units", time_units, {"s", "d", "min", "h"})
check_value("nuc_units", nuc_units,
{"atoms", "atom/b-cm", "atom/cm3"})
times = np.empty_like(self, dtype=float)
concentrations = np.empty_like(self, dtype=float)
# Evaluate value in each region
for i, result in enumerate(self):
times[i] = result.time[0]
concentrations[i] = result[0, mat, nuc]
# Unit conversions
if time_units == "d":
times /= (60 * 60 * 24)
elif time_units == "h":
times /= (60 * 60)
elif time_units == "min":
times /= 60
if nuc_units != "atoms":
# Divide by volume to get density
concentrations /= self[0].volume[mat]
if nuc_units == "atom/b-cm":
# 1 barn = 1e-24 cm^2
concentrations *= 1e-24
return times, concentrations
def get_reaction_rate(self, mat, nuc, rx):
"""Get reaction rate in a single material/nuclide over time
.. note::
Initial values for some isotopes that do not appear in
initial concentrations may be non-zero, depending on the
value of :class:`openmc.deplete.Operator` ``dilute_initial``
The :class:`openmc.deplete.Operator` adds isotopes according
to this setting, which can be set to zero.
Parameters
----------
mat : str
Material name to evaluate
nuc : str
Nuclide name to evaluate
rx : str
Reaction rate to evaluate
Returns
-------
times : numpy.ndarray
Array of times in [s]
rates : numpy.ndarray
Array of reaction rates
"""
times = np.empty_like(self, dtype=float)
rates = np.empty_like(self, dtype=float)
# Evaluate value in each region
for i, result in enumerate(self):
times[i] = result.time[0]
rates[i] = result.rates[0].get(mat, nuc, rx) * result[0, mat, nuc]
return times, rates
def get_eigenvalue(self):
"""Evaluates the eigenvalue from a results list.
Returns
-------
times : numpy.ndarray
Array of times in [s]
eigenvalues : numpy.ndarray
k-eigenvalue at each time. Column 0
contains the eigenvalue, while column
1 contains the associated uncertainty
"""
times = np.empty_like(self, dtype=float)
eigenvalues = np.empty((len(self), 2), dtype=float)
# Get time/eigenvalue at each point
for i, result in enumerate(self):
times[i] = result.time[0]
eigenvalues[i] = result.k[0]
return times, eigenvalues
def get_depletion_time(self):
"""Return an array of the average time to deplete a material
.. note::
Will have one fewer row than number of other methods,
like :meth:`get_eigenvalues`, because no depletion
is performed at the final transport stage
Returns
-------
times : numpy.ndarray
Vector of average time to deplete a single material
across all processes and materials.
"""
times = np.empty(len(self) - 1)
# Need special logic because the predictor
# writes EOS values for step i as BOS values
# for step i+1
# The first proc_time may be zero
if self[0].proc_time > 0.0:
items = self[:-1]
else:
items = self[1:]
for ix, res in enumerate(items):
times[ix] = res.proc_time
return times
| mit | -7,802,270,205,093,695,000 | 29.953608 | 79 | 0.549875 | false | 4.225897 | false | false | false |
gion86/awlsim | awlsim/core/instructions/insn_ssd.py | 2 | 1673 | # -*- coding: utf-8 -*-
#
# AWL simulator - instructions
#
# Copyright 2012-2014 Michael Buesch <m@bues.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from __future__ import division, absolute_import, print_function, unicode_literals
from awlsim.common.compat import *
from awlsim.core.instructions.main import * #@nocy
from awlsim.core.operators import *
#from awlsim.core.instructions.main cimport * #@cy
class AwlInsn_SSD(AwlInsn): #+cdef
__slots__ = ()
def __init__(self, cpu, rawInsn):
AwlInsn.__init__(self, cpu, AwlInsn.TYPE_SSD, rawInsn)
self.assertOpCount((0, 1))
if self.ops:
self.ops[0].assertType(AwlOperator.IMM, 0, 255)
def run(self):
#@cy cdef S7StatusWord s
s = self.cpu.statusWord
accu1 = self.cpu.accu1.getSignedDWord()
if self.ops:
count = self.ops[0].value
else:
count = self.cpu.accu2.getByte()
if count <= 0:
return
count = min(count, 32)
s.A1, s.A0, s.OV = (accu1 >> (count - 1)) & 1, 0, 0
accu1 >>= count
self.cpu.accu1.setDWord(accu1)
| gpl-2.0 | 2,685,739,630,047,624,700 | 29.981481 | 82 | 0.711895 | false | 3.036298 | false | false | false |
PrincetonML/SIF | examples/sim_sif.py | 1 | 1983 | import pickle, sys
sys.path.append('../src')
import data_io, sim_algo, eval, params
## run
wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016)
'../data/glove.840B.300d.txt' # need to download it first
]
weightfile = '../auxiliary_data/enwiki_vocab_min200.txt'
weightparas = [-1, 1e-3]#[-1,1e-1,1e-2,1e-3,1e-4]
rmpcs = [0,1]# [0,1,2]
params = params.params()
parr4para = {}
sarr4para = {}
for wordfile in wordfiles:
(words, We) = data_io.getWordmap(wordfile)
for weightpara in weightparas:
word2weight = data_io.getWordWeight(weightfile, weightpara)
weight4ind = data_io.getWeight(words, word2weight)
for rmpc in rmpcs:
print 'word vectors loaded from %s' % wordfile
print 'word weights computed from %s using parameter a=%f' % (weightfile, weightpara)
params.rmpc = rmpc
print 'remove the first %d principal components' % rmpc
## eval just one example dataset
parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
## eval all datasets; need to obtained datasets from John Wieting (https://github.com/jwieting/iclr2016)
# parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params)
paras = (wordfile, weightfile, weightpara, rmpc)
parr4para[paras] = parr
sarr4para[paras] = sarr
## save results
save_result = False #True
result_file = 'result/sim_sif.result'
comment4para = [ # need to align with the following loop
['word vector files', wordfiles], # comments and values,
['weight parameters', weightparas],
['remove principal component or not', rmpcs]
]
if save_result:
with open(result_file, 'w') as f:
pickle.dump([parr4para, sarr4para, comment4para] , f)
| mit | -3,068,620,019,999,747,000 | 41.108696 | 137 | 0.644478 | false | 3.12776 | false | false | false |
williballenthin/viv-utils | viv_utils/scripts/get_function_args.py | 1 | 1884 | import pprint
import logging
import viv_utils
import viv_utils.emulator_drivers
g_pp = pprint.PrettyPrinter()
class CallArgumentMonitor(viv_utils.emulator_drivers.Monitor):
""" collect call arguments to a target function during emulation """
def __init__(self, vw, target_fva):
""" :param target_fva: address of function whose arguments to monitor """
viv_utils.emulator_drivers.Monitor.__init__(self, vw)
self._fva = target_fva
self._calls = {}
def apicall(self, emu, op, pc, api, argv):
rv = self.getStackValue(emu, 0)
if pc == self._fva:
self._calls[rv] = argv
def getCalls(self):
""" get map of return value of function call to arguments to function call """
return self._calls.copy()
def emulate_function(vw, fva, target_fva):
""" run the given function while collecting arguments to a target function """
emu = vw.getEmulator()
d = viv_utils.emulator_drivers.FunctionRunnerEmulatorDriver(emu)
m = CallArgumentMonitor(vw, target_fva)
d.add_monitor(m)
d.runFunction(fva, maxhit=1)
for k, v in m.getCalls().iteritems():
print(hex(k) + ": " + str(v))
def _main(bin_path, ofva):
fva = int(ofva, 0x10)
logging.basicConfig(level=logging.DEBUG)
vw = viv_utils.getWorkspace(bin_path)
index = viv_utils.InstructionFunctionIndex(vw)
# optimization: avoid re-processing the same function repeatedly
called_fvas = set([])
for callerva in vw.getCallers(fva):
callerfva = index[callerva] # the address of the function that contains this instruction
if callerfva in called_fvas:
continue
emulate_function(vw, index[callerva], fva)
called_fvas.add(callerfva)
return
def main():
import sys
sys.exit(_main(*sys.argv[1:]))
if __name__ == "__main__":
main()
| apache-2.0 | -8,768,602,732,891,916,000 | 25.535211 | 97 | 0.644904 | false | 3.463235 | false | false | false |
Legilibre/SedLex | sedlex/template/__init__.py | 1 | 1464 | import jinja2
import os
import shutil
_ROOT = os.path.abspath(os.path.dirname(__file__))
def template_string(template, values):
template = os.path.join(_ROOT, template)
f = open(template, 'r')
e = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(template))
)
t = e.from_string(f.read().decode('utf-8'))
f.close()
return t.render(values)
def template_file(template, values, out):
template = os.path.join(_ROOT, template)
r = template_string(template, values)
path = os.path.dirname(out)
if not os.path.exists(path):
os.makedirs(path)
f = open(out, 'w')
f.write(r.encode('utf-8') + "\n")
f.truncate()
f.close()
def template_dir(dir, values, out):
dir = os.path.join(_ROOT, dir)
templated_files = []
for root, dirs, files in os.walk(dir):
for name in files:
path = os.path.join(root, name)
out_path = out + path.replace(dir, '')
out_dir = os.path.dirname(out_path)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if name.endswith(".j2"):
out_path = out_path.replace('.j2', '')
template_file(path, values, out_path)
else:
if os.path.exists(out_path):
os.remove(out_path)
shutil.copy(path, out_path)
templated_files.append(out_path)
return templated_files
| agpl-3.0 | 8,459,923,865,238,287,000 | 29.5 | 65 | 0.571038 | false | 3.412587 | false | false | false |
qiang437587687/pythonBrother | Borther/ReStartLearn/XPathLearn.py | 1 | 2575 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
tip : 1. // 定位根节点
tip : 2. / 往下层寻找
tip : 3. /text() 提取文本内容 其实就是把其中的内容转换为 str
tip : 4. /@xxx 提取属性内容
"""
from lxml import etree
import re
import requests
import logging
import json
logging.basicConfig(level=logging.INFO)
# import sys
# reload(sys)
# sys.setdefaulten
html = requests.get('http://www.qiushibaike.com')
# html2 = requests('GET', 'http://www.qiushibaike.com')
# print(html.text)
selector = etree.HTML(html.text)
content = selector.xpath('//*[@id="content-left"]/div')
print(type(content))
print(len(content))
# print(content)
print('------')
for each in content:
cc = each.xpath('div[@class="content"]/text()') # 在查找出来的那些 div 依次寻找content # 这里面解析出来的内容 遇到节点(<br>等)分离其中的元素了
# print('out cc %s' % cc)
if len(cc) >= 1:
# cc.replace('\n', '')
# cc.replace('\n', 'python')
dd = []
for str in cc:
# print('str first %s ' % str)
# print(len(str))
str = str.strip('\n')
dd.append(str)
# print('str after %s ' % str)
# print(len(str))
# print('in cc %s' % cc)
print('\n'.join(dd))
# print(json.loads(cc[0]))
# print(json.loads(cc[0].replace('<br>', '')))
# print(json.loads(cc[0].replace(' ', '')))
# print(type(dd))
# print(len(dd))
# print(dd)
# //*[@id="qiushi_tag_115320313"] //*[@id="content-left"] //*[@id="qiushi_tag_115320313"]
# //*[@id="qiushi_tag_115320313"]/div[2] article block untagged mb15 //*[@id="qiushi_tag_115320153"]/div[2]
# xpath 学习
lhtml = '''
<?xml version="1.0" encoding="ISO-8859-1"?>
<bookstore>
<book>
<title lang="eng">Harry Potter</title>
<price>29.99</price>
</book>
<book>
<title lang="eng">Learning XML</title>
<price>39.95</price>
</book>
</bookstore>
'''
lselector = etree.HTML(lhtml)
content1 = lselector.xpath('bookstore') # 选取此节点的所有子节点
# content1 = lselector.xpath('/html') # 从根节点开始选取(绝对路径). 感觉一般不会这么干
# content1 = lselector.xpath('//section') # // 表示从后面的开始选择 不管位置.
# content1 = lselector.xpath('//section/.') # // .选取当前节点
# content1 = lselector.xpath('//section/..') # //.. 表示选择上层节点
print(type(content1))
print(len(content1))
for each in content1:
print(each.text)
| mit | 8,156,506,987,589,587,000 | 18.922414 | 113 | 0.578105 | false | 2.24587 | false | false | false |
hnu2013wwj/DHCodes | RandomizedAlgorithm/sim_hash.py | 2 | 1486 |
# coding:utf-8
# http://blog.csdn.net/sdj222555/article/details/7970466
class SimHash:
def __init__(self, tokens='', bits=128):
self.bits = bits
self.hash = self.simhash(tokens)
def __str__(self):
return str(self.hash)
def simhash(self, tokens):
v = [0] * self.bits
for t in [self.string_hash(x) for x in tokens]:
for i in range(self.bits):
bitmask = 1 << i
if t & bitmask:
v[i] += 1
else:
v[i] -= 1
fingerprint = 0
for i in range(self.bits):
if v[i] >= 0:
fingerprint += 1 << i
return fingerprint
def hamming_distance(self, other):
x = (self.hash ^ other.hash) & ((1 << self.bits) - 1)
cnt = 0
while x:
cnt += 1
x &= x - 1
return cnt
def similarity(self, other):
a = float(self.hash)
b = float(other.hash)
if a > b:
return b / a
else:
return a / b
def string_hash(self, source):
if source == '':
return 0
else:
x = ord(source[0]) << 7
m = 1000003
mask = 2 ** self.bits - 1
for c in source:
x = ((x * m) ^ ord(c)) & mask
x ^= len(source)
if x == -1:
x = -2
return x
if __name__ == '__main__':
pass
| gpl-2.0 | 4,784,423,877,193,440,000 | 22.587302 | 61 | 0.419919 | false | 3.752525 | false | false | false |
juhaj/topics-python-in-research | codes/python/h5py_write_example.py | 1 | 2416 | import numpy
import h5py
import os
import tempfile
import cProfile
import pstats
def h5py_create(filename, datadict, compression):
'''Create a new HDF5 file called "filename" and save the values of "datadict" into it using its keys as
the dataset names; create an attribute called "compression" holding the value of "compression" parameter.'''
f = h5py.File(filename, mode="w")
attrvalue = "nothing interesting for now"
f.attrs.create("top-level-attribute", attrvalue, dtype="S{x}".format(x=len(attrvalue)))
for name,value in datadict.items():
ds = f.create_dataset(name, data=value, compression=compression, chunks=True)
ds.attrs.create("compression", str(compression), dtype="S{x}".format(x=len(str(compression))))
return
def szip_available():
'''Try to create a dataset using szip: return True if succeeds, False on ValueError (szip not available)
and raise on others.'''
import tempfile
tempf = tempfile.NamedTemporaryFile(dir=".")
f = h5py.File(tempf.name,"w")
try:
f.create_dataset("foo", shape=(10,10), dtype="f8", compression="szip")
except ValueError:
ret = False
else:
ret = True
finally:
f.close()
return ret
data=numpy.random.random((1000,1000,100))
tempfiles = [tempfile.NamedTemporaryFile(dir=".") for i in [0,1,2,3]]
cps = [cProfile.Profile() for i in range(len(tempfiles))]
if (szip_available()):
comp="szip"
else:
comp="gzip"
runs = [None] + 3*[comp]
for i,r in enumerate(runs):
if (i==2):
data[100:900,100:900,30:70]=0.0
if (i==3):
data = numpy.ones((1000,1000,100), dtype=numpy.float64)
cps[i].runcall(h5py_create, tempfiles[i].name, {"array_called_data":data}, r)
print('''Time spent writing hdf5 data and file sizes:
uncompressed random data: {uncompt:g}\t{uncomps}
{comp} compressed random data: {compt:g}\t{comps}
{comp} compressed semirandom data: {semit:g}\t{semis}
{comp} compressed zeros: {zerot:g}\t{zeros}'''.format(
uncomps=os.stat(tempfiles[0].name).st_size,
comps=os.stat(tempfiles[1].name).st_size,
semis=os.stat(tempfiles[2].name).st_size,
zeros=os.stat(tempfiles[3].name).st_size,
uncompt=pstats.Stats(cps[0]).total_tt,
compt=pstats.Stats(cps[1]).total_tt,
semit=pstats.Stats(cps[2]).total_tt,
zerot=pstats.Stats(cps[3]).total_tt,
comp=comp
))
| gpl-3.0 | 3,002,537,640,296,865,000 | 36.75 | 112 | 0.659768 | false | 3.15817 | false | false | false |